Merge tag 'lsk-android-14.05' into develop-3.10
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_js_policy_cfs.c
1 /*
2  *
3  * (C) COPYRIGHT ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 /*
21  * Job Scheduler: Completely Fair Policy Implementation
22  */
23
24 #include <mali_kbase.h>
25 #include <mali_kbase_jm.h>
26 #include <mali_kbase_js.h>
27 #include <mali_kbase_js_policy_cfs.h>
28 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
29 #include <linux/sched/rt.h>
30 #endif
31
32 /**
33  * Define for when dumping is enabled.
34  * This should not be based on the instrumentation level as whether dumping is enabled for a particular level is down to the integrator.
35  * However this is being used for now as otherwise the cinstr headers would be needed.
36  */
37 #define CINSTR_DUMPING_ENABLED (2 == MALI_INSTRUMENTATION_LEVEL)
38
39 /** Fixed point constants used for runtime weight calculations */
40 #define WEIGHT_FIXEDPOINT_SHIFT 10
41 #define WEIGHT_TABLE_SIZE       40
42 #define WEIGHT_0_NICE           (WEIGHT_TABLE_SIZE/2)
43 #define WEIGHT_0_VAL            (1 << WEIGHT_FIXEDPOINT_SHIFT)
44
45 #define LOOKUP_VARIANT_MASK ((1u<<KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS) - 1u)
46
47 #define PROCESS_PRIORITY_MIN (-20)
48 #define PROCESS_PRIORITY_MAX  (19)
49
50 /** Core requirements that all the variants support */
51 #define JS_CORE_REQ_ALL_OTHERS \
52         (BASE_JD_REQ_CF | BASE_JD_REQ_V | BASE_JD_REQ_PERMON | BASE_JD_REQ_EXTERNAL_RESOURCES | BASEP_JD_REQ_EVENT_NEVER)
53
54 /** Context requirements the all the variants support */
55
56 /* In HW issue 8987 workaround, restrict Compute-only contexts and Compute jobs onto job slot[2],
57  * which will ensure their affinity does not intersect GLES jobs */
58 #define JS_CTX_REQ_ALL_OTHERS_8987 \
59         (KBASE_CTX_FLAG_PRIVILEGED)
60 #define JS_CORE_REQ_COMPUTE_SLOT_8987 \
61         (BASE_JD_REQ_CS)
62 #define JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 \
63         (BASE_JD_REQ_ONLY_COMPUTE)
64
65 /* Otherwise, compute-only contexts/compute jobs can use any job slot */
66 #define JS_CTX_REQ_ALL_OTHERS \
67         (KBASE_CTX_FLAG_PRIVILEGED | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE)
68 #define JS_CORE_REQ_COMPUTE_SLOT \
69         (BASE_JD_REQ_CS | BASE_JD_REQ_ONLY_COMPUTE)
70
71 /* core_req variants are ordered by least restrictive first, so that our
72  * algorithm in cached_variant_idx_init picks the least restrictive variant for
73  * each job . Note that coherent_group requirement is added to all CS variants as the
74  * selection of job-slot does not depend on the coherency requirement. */
75 static const kbasep_atom_req core_req_variants[] = {
76         {
77          /* 0: Fragment variant */
78          (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_FS_AFBC |
79                                                 BASE_JD_REQ_COHERENT_GROUP),
80          (JS_CTX_REQ_ALL_OTHERS),
81          0},
82         {
83          /* 1: Compute variant, can use all coregroups */
84          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT),
85          (JS_CTX_REQ_ALL_OTHERS),
86          0},
87         {
88          /* 2: Compute variant, uses only coherent coregroups */
89          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP),
90          (JS_CTX_REQ_ALL_OTHERS),
91          0},
92         {
93          /* 3: Compute variant, might only use coherent coregroup, and must use tiling */
94          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_T),
95          (JS_CTX_REQ_ALL_OTHERS),
96          0},
97
98         {
99          /* 4: Unused */
100          0,
101          0,
102          0},
103
104         {
105          /* 5: Compute variant for specific-coherent-group targetting CoreGroup 0 */
106          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
107          (JS_CTX_REQ_ALL_OTHERS),
108          0                      /* device_nr */
109          },
110         {
111          /* 6: Compute variant for specific-coherent-group targetting CoreGroup 1 */
112          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
113          (JS_CTX_REQ_ALL_OTHERS),
114          1                      /* device_nr */
115          },
116
117         /* Unused core_req variants, to bring the total up to a power of 2 */
118         {
119          /* 7 */
120          0,
121          0,
122          0},
123 };
124
125 static const kbasep_atom_req core_req_variants_8987[] = {
126         {
127          /* 0: Fragment variant */
128          (JS_CORE_REQ_ALL_OTHERS | BASE_JD_REQ_FS | BASE_JD_REQ_COHERENT_GROUP),
129          (JS_CTX_REQ_ALL_OTHERS_8987),
130          0},
131         {
132          /* 1: Compute variant, can use all coregroups */
133          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT_8987),
134          (JS_CTX_REQ_ALL_OTHERS_8987),
135          0},
136         {
137          /* 2: Compute variant, uses only coherent coregroups */
138          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP),
139          (JS_CTX_REQ_ALL_OTHERS_8987),
140          0},
141         {
142          /* 3: Compute variant, might only use coherent coregroup, and must use tiling */
143          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_T),
144          (JS_CTX_REQ_ALL_OTHERS_8987),
145          0},
146
147         {
148          /* 4: Variant guarenteed to support Compute contexts/atoms
149           *
150           * In the case of a context that's specified as 'Only Compute', it'll
151           * not allow Tiler or Fragment atoms, and so those get rejected */
152          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP),
153          (JS_CTX_REQ_ALL_OTHERS_8987 | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE),
154          0},
155
156         {
157          /* 5: Compute variant for specific-coherent-group targetting CoreGroup 0
158           * Specifically, this only allows 'Only Compute' contexts/atoms */
159          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
160          (JS_CTX_REQ_ALL_OTHERS_8987 | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE),
161          0                      /* device_nr */
162          },
163         {
164          /* 6: Compute variant for specific-coherent-group targetting CoreGroup 1
165           * Specifically, this only allows 'Only Compute' contexts/atoms */
166          (JS_CORE_REQ_ALL_OTHERS | JS_CORE_REQ_ONLY_COMPUTE_SLOT_8987 | BASE_JD_REQ_COHERENT_GROUP | BASE_JD_REQ_SPECIFIC_COHERENT_GROUP),
167          (JS_CTX_REQ_ALL_OTHERS_8987 | KBASE_CTX_FLAG_HINT_ONLY_COMPUTE),
168          1                      /* device_nr */
169          },
170         /* Unused core_req variants, to bring the total up to a power of 2 */
171         {
172          /* 7 */
173          0,
174          0,
175          0},
176 };
177
178 #define CORE_REQ_VARIANT_FRAGMENT                    0
179 #define CORE_REQ_VARIANT_COMPUTE_ALL_CORES           1
180 #define CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP 2
181 #define CORE_REQ_VARIANT_COMPUTE_OR_TILING           3
182 #define CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_0 5
183 #define CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_1 6
184
185 #define CORE_REQ_VARIANT_ONLY_COMPUTE_8987                     4
186 #define CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_0 5
187 #define CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_1 6
188
189 #define NUM_CORE_REQ_VARIANTS NELEMS(core_req_variants)
190 #define NUM_CORE_REQ_VARIANTS_8987 NELEMS(core_req_variants_8987)
191
192 /** Mappings between job slot and variant lists for Soft-Stoppable State */
193 static const u32 variants_supported_ss_state[] = {
194         /* js[0] uses Fragment only */
195         (1u << CORE_REQ_VARIANT_FRAGMENT),
196
197         /* js[1] uses: Compute-all-cores, Compute-only-coherent, Compute-or-Tiling,
198          * compute-specific-coregroup-0 */
199         (1u << CORE_REQ_VARIANT_COMPUTE_ALL_CORES)
200             | (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
201             | (1u << CORE_REQ_VARIANT_COMPUTE_OR_TILING)
202             | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_0),
203
204         /* js[2] uses: Compute-only-coherent, compute-specific-coregroup-1 */
205         (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
206             | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_1)
207 };
208
209 /** Mappings between job slot and variant lists for Soft-Stoppable State, when
210  * we have atoms that can use all the cores (KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES)
211  * and there's more than one coregroup */
212 static const u32 variants_supported_ss_allcore_state[] = {
213         /* js[0] uses Fragment only */
214         (1u << CORE_REQ_VARIANT_FRAGMENT),
215
216         /* js[1] uses: Compute-all-cores, Compute-only-coherent, Compute-or-Tiling,
217          * compute-specific-coregroup-0, compute-specific-coregroup-1 */
218         (1u << CORE_REQ_VARIANT_COMPUTE_ALL_CORES)
219             | (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
220             | (1u << CORE_REQ_VARIANT_COMPUTE_OR_TILING)
221             | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_0)
222             | (1u << CORE_REQ_VARIANT_COMPUTE_SPECIFIC_COHERENT_1),
223
224         /* js[2] not used */
225         0
226 };
227
228 /** Mappings between job slot and variant lists for Soft-Stoppable State for
229  * BASE_HW_ISSUE_8987
230  *
231  * @note There is no 'allcores' variant of this, because this HW issue forces all
232  * atoms with BASE_JD_CORE_REQ_SPECIFIC_COHERENT_GROUP to use slot 2 anyway -
233  * hence regardless of whether a specific coregroup is targetted, those atoms
234  * still make progress. */
235 static const u32 variants_supported_ss_state_8987[] = {
236         /* js[0] uses Fragment only */
237         (1u << CORE_REQ_VARIANT_FRAGMENT),
238
239         /* js[1] uses: Compute-all-cores, Compute-only-coherent, Compute-or-Tiling */
240         (1u << CORE_REQ_VARIANT_COMPUTE_ALL_CORES)
241             | (1u << CORE_REQ_VARIANT_COMPUTE_ONLY_COHERENT_GROUP)
242             | (1u << CORE_REQ_VARIANT_COMPUTE_OR_TILING),
243
244         /* js[2] uses: All Only-compute atoms (including those targetting a
245          * specific coregroup), and nothing else. This is because their affinity
246          * must not intersect with non-only-compute atoms.
247          *
248          * As a side effect, this causes the 'device_nr' for atoms targetting a
249          * specific coregroup to be ignored */
250         (1u << CORE_REQ_VARIANT_ONLY_COMPUTE_8987)
251             | (1u << CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_0)
252             | (1u << CORE_REQ_VARIANT_ONLY_COMPUTE_8987_SPECIFIC_COHERENT_1)
253 };
254
255 /* Defines for easy asserts 'is scheduled'/'is queued'/'is neither queued norscheduled' */
256 #define KBASEP_JS_CHECKFLAG_QUEUED       (1u << 0) /**< Check the queued state */
257 #define KBASEP_JS_CHECKFLAG_SCHEDULED    (1u << 1) /**< Check the scheduled state */
258 #define KBASEP_JS_CHECKFLAG_IS_QUEUED    (1u << 2) /**< Expect queued state to be set */
259 #define KBASEP_JS_CHECKFLAG_IS_SCHEDULED (1u << 3) /**< Expect scheduled state to be set */
260
261 enum {
262         KBASEP_JS_CHECK_NOTQUEUED = KBASEP_JS_CHECKFLAG_QUEUED,
263         KBASEP_JS_CHECK_NOTSCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED,
264         KBASEP_JS_CHECK_QUEUED = KBASEP_JS_CHECKFLAG_QUEUED | KBASEP_JS_CHECKFLAG_IS_QUEUED,
265         KBASEP_JS_CHECK_SCHEDULED = KBASEP_JS_CHECKFLAG_SCHEDULED | KBASEP_JS_CHECKFLAG_IS_SCHEDULED
266 };
267
268 typedef u32 kbasep_js_check;
269
270 /*
271  * Private Functions
272  */
273
274 /* Table autogenerated using util built from: midgard/scripts/gen_cfs_weight_of_prio.c */
275
276 /* weight = 1.25 */
277 static const int weight_of_priority[] = {
278         /*  -20 */ 11, 14, 18, 23,
279         /*  -16 */ 29, 36, 45, 56,
280         /*  -12 */ 70, 88, 110, 137,
281         /*   -8 */ 171, 214, 268, 335,
282         /*   -4 */ 419, 524, 655, 819,
283         /*    0 */ 1024, 1280, 1600, 2000,
284         /*    4 */ 2500, 3125, 3906, 4883,
285         /*    8 */ 6104, 7630, 9538, 11923,
286         /*   12 */ 14904, 18630, 23288, 29110,
287         /*   16 */ 36388, 45485, 56856, 71070
288 };
289
290 /**
291  * @note There is nothing to stop the priority of the ctx containing \a
292  * ctx_info changing during or immediately after this function is called
293  * (because its jsctx_mutex cannot be held during IRQ). Therefore, this
294  * function should only be seen as a heuristic guide as to the priority weight
295  * of the context.
296  */
297 STATIC u64 priority_weight(kbasep_js_policy_cfs_ctx *ctx_info, u64 time_us)
298 {
299         u64 time_delta_us;
300         int priority;
301         priority = ctx_info->process_priority + ctx_info->bag_priority;
302
303         /* Adjust runtime_us using priority weight if required */
304         if (priority != 0 && time_us != 0) {
305                 int clamped_priority;
306
307                 /* Clamp values to min..max weights */
308                 if (priority > PROCESS_PRIORITY_MAX)
309                         clamped_priority = PROCESS_PRIORITY_MAX;
310                 else if (priority < PROCESS_PRIORITY_MIN)
311                         clamped_priority = PROCESS_PRIORITY_MIN;
312                 else
313                         clamped_priority = priority;
314
315                 /* Fixed point multiplication */
316                 time_delta_us = (time_us * weight_of_priority[WEIGHT_0_NICE + clamped_priority]);
317                 /* Remove fraction */
318                 time_delta_us = time_delta_us >> WEIGHT_FIXEDPOINT_SHIFT;
319                 /* Make sure the time always increases */
320                 if (0 == time_delta_us)
321                         time_delta_us++;
322         } else {
323                 time_delta_us = time_us;
324         }
325
326         return time_delta_us;
327 }
328
329 #if KBASE_TRACE_ENABLE != 0
330 STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_context *kctx)
331 {
332         kbasep_js_device_data *js_devdata;
333         int as_nr;
334         int refcnt = 0;
335
336         js_devdata = &kbdev->js_data;
337
338         as_nr = kctx->as_nr;
339         if (as_nr != KBASEP_AS_NR_INVALID) {
340                 kbasep_js_per_as_data *js_per_as_data;
341                 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
342
343                 refcnt = js_per_as_data->as_busy_refcount;
344         }
345
346         return refcnt;
347 }
348
349 STATIC INLINE int kbasep_js_policy_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
350 {
351         unsigned long flags;
352         kbasep_js_device_data *js_devdata;
353         int refcnt = 0;
354
355         js_devdata = &kbdev->js_data;
356
357         spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
358         refcnt = kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx);
359         spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
360
361         return refcnt;
362 }
363 #else                           /* KBASE_TRACE_ENABLE != 0 */
364 STATIC int kbasep_js_policy_trace_get_refcnt_nolock(kbase_device *kbdev, kbase_context *kctx)
365 {
366         CSTD_UNUSED(kbdev);
367         CSTD_UNUSED(kctx);
368         return 0;
369 }
370
371 STATIC INLINE int kbasep_js_policy_trace_get_refcnt(kbase_device *kbdev, kbase_context *kctx)
372 {
373         CSTD_UNUSED(kbdev);
374         CSTD_UNUSED(kctx);
375         return 0;
376 }
377 #endif                          /* KBASE_TRACE_ENABLE != 0 */
378
379 #ifdef CONFIG_MALI_DEBUG
380 STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag)
381 {
382         /* This function uses the ternary operator and non-explicit comparisons,
383          * because it makes for much shorter, easier to read code */
384
385         if (check_flag & KBASEP_JS_CHECKFLAG_QUEUED) {
386                 mali_bool is_queued;
387                 mali_bool expect_queued;
388                 is_queued = (kbasep_list_member_of(&policy_info->ctx_queue_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
389
390                 if (!is_queued)
391                         is_queued = (kbasep_list_member_of(&policy_info->ctx_rt_queue_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
392
393                 expect_queued = (check_flag & KBASEP_JS_CHECKFLAG_IS_QUEUED) ? MALI_TRUE : MALI_FALSE;
394
395                 KBASE_DEBUG_ASSERT_MSG(expect_queued == is_queued, "Expected context %p to be %s but it was %s\n", kctx, (expect_queued) ? "queued" : "not queued", (is_queued) ? "queued" : "not queued");
396
397         }
398
399         if (check_flag & KBASEP_JS_CHECKFLAG_SCHEDULED) {
400                 mali_bool is_scheduled;
401                 mali_bool expect_scheduled;
402                 is_scheduled = (kbasep_list_member_of(&policy_info->scheduled_ctxs_head, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list)) ? MALI_TRUE : MALI_FALSE;
403
404                 expect_scheduled = (check_flag & KBASEP_JS_CHECKFLAG_IS_SCHEDULED) ? MALI_TRUE : MALI_FALSE;
405                 KBASE_DEBUG_ASSERT_MSG(expect_scheduled == is_scheduled, "Expected context %p to be %s but it was %s\n", kctx, (expect_scheduled) ? "scheduled" : "not scheduled", (is_scheduled) ? "scheduled" : "not scheduled");
406
407         }
408
409 }
410 #else                           /* CONFIG_MALI_DEBUG */
411 STATIC void kbasep_js_debug_check(kbasep_js_policy_cfs *policy_info, kbase_context *kctx, kbasep_js_check check_flag)
412 {
413         CSTD_UNUSED(policy_info);
414         CSTD_UNUSED(kctx);
415         CSTD_UNUSED(check_flag);
416         return;
417 }
418 #endif                          /* CONFIG_MALI_DEBUG */
419
420 STATIC INLINE void set_slot_to_variant_lookup(u32 *bit_array, u32 slot_idx, u32 variants_supported)
421 {
422         u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
423         u32 word_idx = overall_bit_idx / 32;
424         u32 bit_idx = overall_bit_idx % 32;
425
426         KBASE_DEBUG_ASSERT(slot_idx < BASE_JM_MAX_NR_SLOTS);
427         KBASE_DEBUG_ASSERT((variants_supported & ~LOOKUP_VARIANT_MASK) == 0);
428
429         bit_array[word_idx] |= variants_supported << bit_idx;
430 }
431
432 STATIC INLINE u32 get_slot_to_variant_lookup(u32 *bit_array, u32 slot_idx)
433 {
434         u32 overall_bit_idx = slot_idx * KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS;
435         u32 word_idx = overall_bit_idx / 32;
436         u32 bit_idx = overall_bit_idx % 32;
437
438         u32 res;
439
440         KBASE_DEBUG_ASSERT(slot_idx < BASE_JM_MAX_NR_SLOTS);
441
442         res = bit_array[word_idx] >> bit_idx;
443         res &= LOOKUP_VARIANT_MASK;
444
445         return res;
446 }
447
448 /* Check the core_req_variants: make sure that every job slot is satisifed by
449  * one of the variants. This checks that cached_variant_idx_init will produce a
450  * valid result for jobs that make maximum use of the job slots.
451  *
452  * @note The checks are limited to the job slots - this does not check that
453  * every context requirement is covered (because some are intentionally not
454  * supported, such as KBASE_CTX_FLAG_SUBMIT_DISABLED) */
455 #ifdef CONFIG_MALI_DEBUG
456 STATIC void debug_check_core_req_variants(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
457 {
458         kbasep_js_device_data *js_devdata;
459         u32 i;
460         int j;
461
462         js_devdata = &kbdev->js_data;
463
464         for (j = 0; j < kbdev->gpu_props.num_job_slots; ++j) {
465                 base_jd_core_req job_core_req;
466                 mali_bool found = MALI_FALSE;
467
468                 job_core_req = js_devdata->js_reqs[j];
469                 for (i = 0; i < policy_info->num_core_req_variants; ++i) {
470                         base_jd_core_req var_core_req;
471                         var_core_req = policy_info->core_req_variants[i].core_req;
472
473                         if ((var_core_req & job_core_req) == job_core_req) {
474                                 found = MALI_TRUE;
475                                 break;
476                         }
477                 }
478
479                 /* Early-out on any failure */
480                 KBASE_DEBUG_ASSERT_MSG(found != MALI_FALSE, "Job slot %d features 0x%x not matched by core_req_variants. " "Rework core_req_variants and vairants_supported_<...>_state[] to match\n", j, job_core_req);
481         }
482 }
483 #endif
484
485 STATIC void build_core_req_variants(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
486 {
487         KBASE_DEBUG_ASSERT(kbdev != NULL);
488         KBASE_DEBUG_ASSERT(policy_info != NULL);
489         CSTD_UNUSED(kbdev);
490
491         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987)) {
492                 KBASE_DEBUG_ASSERT(NUM_CORE_REQ_VARIANTS_8987 <= KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS);
493
494                 /* Assume a static set of variants */
495                 memcpy(policy_info->core_req_variants, core_req_variants_8987, sizeof(core_req_variants_8987));
496
497                 policy_info->num_core_req_variants = NUM_CORE_REQ_VARIANTS_8987;
498         } else {
499                 KBASE_DEBUG_ASSERT(NUM_CORE_REQ_VARIANTS <= KBASEP_JS_MAX_NR_CORE_REQ_VARIANTS);
500
501                 /* Assume a static set of variants */
502                 memcpy(policy_info->core_req_variants, core_req_variants, sizeof(core_req_variants));
503
504                 policy_info->num_core_req_variants = NUM_CORE_REQ_VARIANTS;
505         }
506
507         KBASE_DEBUG_CODE(debug_check_core_req_variants(kbdev, policy_info));
508 }
509
510 STATIC void build_slot_lookups(kbase_device *kbdev, kbasep_js_policy_cfs *policy_info)
511 {
512         u8 i;
513         const u32 *variants_supported_ss_for_this_hw = variants_supported_ss_state;
514
515         KBASE_DEBUG_ASSERT(kbdev != NULL);
516         KBASE_DEBUG_ASSERT(policy_info != NULL);
517
518         KBASE_DEBUG_ASSERT(kbdev->gpu_props.num_job_slots <= NELEMS(variants_supported_ss_state));
519         KBASE_DEBUG_ASSERT(kbdev->gpu_props.num_job_slots <= NELEMS(variants_supported_ss_allcore_state));
520         KBASE_DEBUG_ASSERT(kbdev->gpu_props.num_job_slots <= NELEMS(variants_supported_ss_state_8987));
521
522         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_8987))
523                 variants_supported_ss_for_this_hw = variants_supported_ss_state_8987;
524
525         /* Given the static set of variants, provide a static set of lookups */
526         for (i = 0; i < kbdev->gpu_props.num_job_slots; ++i) {
527                 set_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_state, i, variants_supported_ss_for_this_hw[i]);
528
529                 set_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_allcore_state, i, variants_supported_ss_allcore_state[i]);
530         }
531
532 }
533
534 STATIC mali_error cached_variant_idx_init(const kbasep_js_policy_cfs *policy_info, const kbase_context *kctx, kbase_jd_atom *atom)
535 {
536         kbasep_js_policy_cfs_job *job_info;
537         u32 i;
538         base_jd_core_req job_core_req;
539         u32 job_device_nr;
540         kbase_context_flags ctx_flags;
541         const kbasep_js_kctx_info *js_kctx_info;
542         const kbase_device *kbdev;
543
544         KBASE_DEBUG_ASSERT(policy_info != NULL);
545         KBASE_DEBUG_ASSERT(kctx != NULL);
546         KBASE_DEBUG_ASSERT(atom != NULL);
547
548         kbdev = container_of(policy_info, const kbase_device, js_data.policy.cfs);
549         job_info = &atom->sched_info.cfs;
550         job_core_req = atom->core_req;
551         job_device_nr = atom->device_nr;
552         js_kctx_info = &kctx->jctx.sched_info;
553         ctx_flags = js_kctx_info->ctx.flags;
554
555         /* Initial check for atoms targetting a specific coregroup */
556         if ((job_core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) != MALI_FALSE && job_device_nr >= kbdev->gpu_props.num_core_groups) {
557                 /* device_nr exceeds the number of coregroups - not allowed by
558                  * @ref base_jd_atom API contract */
559                 return MALI_ERROR_FUNCTION_FAILED;
560         }
561
562         /* Pick a core_req variant that matches us. Since they're ordered by least
563          * restrictive first, it picks the least restrictive variant */
564         for (i = 0; i < policy_info->num_core_req_variants; ++i) {
565                 base_jd_core_req var_core_req;
566                 kbase_context_flags var_ctx_req;
567                 u32 var_device_nr;
568                 var_core_req = policy_info->core_req_variants[i].core_req;
569                 var_ctx_req = policy_info->core_req_variants[i].ctx_req;
570                 var_device_nr = policy_info->core_req_variants[i].device_nr;
571
572                 if ((var_core_req & job_core_req) == job_core_req && (var_ctx_req & ctx_flags) == ctx_flags && ((var_core_req & BASE_JD_REQ_SPECIFIC_COHERENT_GROUP) == MALI_FALSE || var_device_nr == job_device_nr)) {
573                         job_info->cached_variant_idx = i;
574                         return MALI_ERROR_NONE;
575                 }
576         }
577
578         /* Could not find a matching requirement, this should only be caused by an
579          * attempt to attack the driver. */
580         return MALI_ERROR_FUNCTION_FAILED;
581 }
582
583 STATIC mali_bool dequeue_job(kbase_device *kbdev,
584                              kbase_context *kctx,
585                              u32 variants_supported,
586                              kbase_jd_atom ** const katom_ptr,
587                              int job_slot_idx)
588 {
589         kbasep_js_device_data *js_devdata;
590         kbasep_js_policy_cfs *policy_info;
591         kbasep_js_policy_cfs_ctx *ctx_info;
592
593         KBASE_DEBUG_ASSERT(kbdev != NULL);
594         KBASE_DEBUG_ASSERT(katom_ptr != NULL);
595         KBASE_DEBUG_ASSERT(kctx != NULL);
596
597         js_devdata = &kbdev->js_data;
598         policy_info = &js_devdata->policy.cfs;
599         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
600
601         /* Only submit jobs from contexts that are allowed */
602         if (kbasep_js_is_submit_allowed(js_devdata, kctx) != MALI_FALSE) {
603                 /* Check each variant in turn */
604                 while (variants_supported != 0) {
605                         long variant_idx;
606                         struct list_head *job_list;
607                         variant_idx = ffs(variants_supported) - 1;
608                         job_list = &ctx_info->job_list_head[variant_idx];
609
610                         if (!list_empty(job_list)) {
611                                 /* Found a context with a matching job */
612                                 {
613                                         kbase_jd_atom *front_atom = list_entry(job_list->next, kbase_jd_atom, sched_info.cfs.list);
614                                         KBASE_TRACE_ADD_SLOT(kbdev, JS_POLICY_DEQUEUE_JOB, front_atom->kctx, front_atom, front_atom->jc, job_slot_idx);
615                                 }
616                                 *katom_ptr = list_entry(job_list->next, kbase_jd_atom, sched_info.cfs.list);
617                                 list_del(job_list->next);
618
619                                 (*katom_ptr)->sched_info.cfs.ticks = 0;
620
621                                 /* Put this context at the back of the Run Pool */
622                                 list_del(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
623                                 list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, &policy_info->scheduled_ctxs_head);
624
625                                 return MALI_TRUE;
626                         }
627
628                         variants_supported &= ~(1u << variant_idx);
629                 }
630                 /* All variants checked by here */
631         }
632
633         /* The context does not have a  matching job */
634
635         return MALI_FALSE;
636 }
637
638 /**
639  * Hold the runpool_irq spinlock for this
640  */
641 STATIC INLINE mali_bool timer_callback_should_run(kbase_device *kbdev)
642 {
643         kbasep_js_device_data *js_devdata;
644         s8 nr_running_ctxs;
645
646         KBASE_DEBUG_ASSERT(kbdev != NULL);
647         js_devdata = &kbdev->js_data;
648
649         /* nr_user_contexts_running is updated with the runpool_mutex. However, the
650          * locking in the caller gives us a barrier that ensures nr_user_contexts is
651          * up-to-date for reading */
652         nr_running_ctxs = js_devdata->nr_user_contexts_running;
653
654 #ifdef CONFIG_MALI_DEBUG
655         if (js_devdata->softstop_always && nr_running_ctxs > 0) {
656                 /* Debug support for allowing soft-stop on a single context */
657                 return MALI_TRUE;
658         }
659 #endif                          /* CONFIG_MALI_DEBUG */
660
661         if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
662                 /* Timeouts would have to be 4x longer (due to micro-architectural design)
663                  * to support OpenCL conformance tests, so only run the timer when there's:
664                  * - 2 or more CL contexts
665                  * - 1 or more GLES contexts
666                  *
667                  * NOTE: We will treat a context that has both Compute and Non-Compute jobs
668                  * will be treated as an OpenCL context (hence, we don't check
669                  * KBASEP_JS_CTX_ATTR_NON_COMPUTE).
670                  */
671                 {
672                         s8 nr_compute_ctxs = kbasep_js_ctx_attr_count_on_runpool(kbdev, KBASEP_JS_CTX_ATTR_COMPUTE);
673                         s8 nr_noncompute_ctxs = nr_running_ctxs - nr_compute_ctxs;
674
675                         return (mali_bool) (nr_compute_ctxs >= 2 || nr_noncompute_ctxs > 0);
676                 }
677         } else {
678                 /* Run the timer callback whenever you have at least 1 context */
679                 return (mali_bool) (nr_running_ctxs > 0);
680         }
681 }
682
683 static enum hrtimer_restart timer_callback(struct hrtimer *timer)
684 {
685         unsigned long flags;
686         kbase_device *kbdev;
687         kbasep_js_device_data *js_devdata;
688         kbasep_js_policy_cfs *policy_info;
689         int s;
690         mali_bool reset_needed = MALI_FALSE;
691
692         KBASE_DEBUG_ASSERT(timer != NULL);
693
694         policy_info = container_of(timer, kbasep_js_policy_cfs, scheduling_timer);
695         kbdev = container_of(policy_info, kbase_device, js_data.policy.cfs);
696         js_devdata = &kbdev->js_data;
697
698         /* Loop through the slots */
699         spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
700         for (s = 0; s < kbdev->gpu_props.num_job_slots; s++) {
701                 kbase_jm_slot *slot = &kbdev->jm_slots[s];
702                 kbase_jd_atom *atom = NULL;
703
704                 if (kbasep_jm_nr_jobs_submitted(slot) > 0) {
705                         atom = kbasep_jm_peek_idx_submit_slot(slot, 0);
706                         KBASE_DEBUG_ASSERT(atom != NULL);
707
708                         if (kbasep_jm_is_dummy_workaround_job(kbdev, atom) != MALI_FALSE) {
709                                 /* Prevent further use of the atom - never cause a soft-stop, hard-stop, or a GPU reset due to it. */
710                                 atom = NULL;
711                         }
712                 }
713
714                 if (atom != NULL) {
715                         /* The current version of the model doesn't support Soft-Stop */
716                         if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
717                                 u32 ticks = atom->sched_info.cfs.ticks++;
718
719 #if !CINSTR_DUMPING_ENABLED
720                                 u32 soft_stop_ticks, hard_stop_ticks, gpu_reset_ticks;
721                                 if (atom->core_req & BASE_JD_REQ_ONLY_COMPUTE) {
722                                         soft_stop_ticks = js_devdata->soft_stop_ticks_cl;
723                                         hard_stop_ticks = js_devdata->hard_stop_ticks_cl;
724                                         gpu_reset_ticks = js_devdata->gpu_reset_ticks_cl;
725                                 } else {
726                                         soft_stop_ticks = js_devdata->soft_stop_ticks;
727                                         hard_stop_ticks = js_devdata->hard_stop_ticks_ss;
728                                         gpu_reset_ticks = js_devdata->gpu_reset_ticks_ss;
729                                 }
730
731                                 /* Job is Soft-Stoppable */
732                                 if (ticks == soft_stop_ticks) {
733                                         /* Job has been scheduled for at least js_devdata->soft_stop_ticks ticks.
734                                          * Soft stop the slot so we can run other jobs.
735                                          */
736                                         dev_dbg(kbdev->dev, "Soft-stop");
737
738 #if KBASE_DISABLE_SCHEDULING_SOFT_STOPS == 0
739                                         kbase_job_slot_softstop(kbdev, s, atom);
740 #endif
741                                 } else if (ticks == hard_stop_ticks) {
742                                         /* Job has been scheduled for at least js_devdata->hard_stop_ticks_ss ticks.
743                                          * It should have been soft-stopped by now. Hard stop the slot.
744                                          */
745 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
746                                         dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", (unsigned long)ticks, (unsigned long)(js_devdata->scheduling_tick_ns / 1000000u));
747                                         kbase_job_slot_hardstop(atom->kctx, s, atom);
748 #endif
749                                 } else if (ticks == gpu_reset_ticks) {
750                                         /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_ss ticks.
751                                          * It should have left the GPU by now. Signal that the GPU needs to be reset.
752                                          */
753                                         reset_needed = MALI_TRUE;
754                                 }
755 #else                           /* !CINSTR_DUMPING_ENABLED */
756                                 /* NOTE: During CINSTR_DUMPING_ENABLED, we use the alternate timeouts, which
757                                  * makes the hard-stop and GPU reset timeout much longer. We also ensure that
758                                  * we don't soft-stop at all. */
759                                 if (ticks == js_devdata->soft_stop_ticks) {
760                                         /* Job has been scheduled for at least js_devdata->soft_stop_ticks.
761                                          * We do not soft-stop during CINSTR_DUMPING_ENABLED, however.
762                                          */
763                                         dev_dbg(kbdev->dev, "Soft-stop");
764                                 } else if (ticks == js_devdata->hard_stop_ticks_nss) {
765                                         /* Job has been scheduled for at least js_devdata->hard_stop_ticks_nss ticks.
766                                          * Hard stop the slot.
767                                          */
768 #if KBASE_DISABLE_SCHEDULING_HARD_STOPS == 0
769                                         dev_warn(kbdev->dev, "JS: Job Hard-Stopped (took more than %lu ticks at %lu ms/tick)", (unsigned long)ticks, (unsigned long)(js_devdata->scheduling_tick_ns / 1000000u));
770                                         kbase_job_slot_hardstop(atom->kctx, s, atom);
771 #endif
772                                 } else if (ticks == js_devdata->gpu_reset_ticks_nss) {
773                                         /* Job has been scheduled for at least js_devdata->gpu_reset_ticks_nss ticks.
774                                          * It should have left the GPU by now. Signal that the GPU needs to be reset.
775                                          */
776                                         reset_needed = MALI_TRUE;
777                                 }
778 #endif                          /* !CINSTR_DUMPING_ENABLED */
779                         }
780                 }
781         }
782
783         if (reset_needed) {
784                 dev_err(kbdev->dev, "JS: Job has been on the GPU for too long (KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS/NSS timeout hit). Issueing GPU soft-reset to resolve.");
785
786                 if (kbase_prepare_to_reset_gpu_locked(kbdev))
787                         kbase_reset_gpu_locked(kbdev);
788         }
789
790         /* the timer is re-issued if there is contexts in the run-pool */
791
792         if (timer_callback_should_run(kbdev) != MALI_FALSE) {
793                 hrtimer_start(&policy_info->scheduling_timer, HR_TIMER_DELAY_NSEC(js_devdata->scheduling_tick_ns), HRTIMER_MODE_REL);
794         } else {
795                 KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_END, NULL, NULL, 0u, 0u);
796                 /* timer_running state is updated by kbasep_js_policy_runpool_timers_sync() */
797         }
798
799         spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
800
801         return HRTIMER_NORESTART;
802 }
803
804 /*
805  * Non-private functions
806  */
807
808 mali_error kbasep_js_policy_init(kbase_device *kbdev)
809 {
810         kbasep_js_device_data *js_devdata;
811         kbasep_js_policy_cfs *policy_info;
812
813         KBASE_DEBUG_ASSERT(kbdev != NULL);
814         js_devdata = &kbdev->js_data;
815         policy_info = &js_devdata->policy.cfs;
816
817         INIT_LIST_HEAD(&policy_info->ctx_queue_head);
818         INIT_LIST_HEAD(&policy_info->scheduled_ctxs_head);
819         INIT_LIST_HEAD(&policy_info->ctx_rt_queue_head);
820
821         atomic64_set(&policy_info->least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
822         atomic64_set(&policy_info->rt_least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
823
824         hrtimer_init(&policy_info->scheduling_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
825         policy_info->scheduling_timer.function = timer_callback;
826
827         policy_info->timer_running = MALI_FALSE;
828         policy_info->head_runtime_us = 0;
829
830         /* Build up the core_req variants */
831         build_core_req_variants(kbdev, policy_info);
832         /* Build the slot to variant lookups */
833         build_slot_lookups(kbdev, policy_info);
834
835         return MALI_ERROR_NONE;
836 }
837
838 void kbasep_js_policy_term(kbasep_js_policy *js_policy)
839 {
840         kbasep_js_policy_cfs *policy_info;
841         KBASE_DEBUG_ASSERT(js_policy != NULL);
842         policy_info = &js_policy->cfs;
843
844         /* ASSERT that there are no contexts queued */
845         KBASE_DEBUG_ASSERT(list_empty(&policy_info->ctx_queue_head));
846         KBASE_DEBUG_ASSERT(KBASEP_JS_RUNTIME_EMPTY == atomic64_read(&policy_info->least_runtime_us));
847
848         /* ASSERT that there are no contexts scheduled */
849         KBASE_DEBUG_ASSERT(list_empty(&policy_info->scheduled_ctxs_head));
850
851         /* ASSERT that there are no contexts queued */
852         KBASE_DEBUG_ASSERT(list_empty(&policy_info->ctx_rt_queue_head));
853         KBASE_DEBUG_ASSERT(KBASEP_JS_RUNTIME_EMPTY == atomic64_read(&policy_info->rt_least_runtime_us));
854
855         hrtimer_cancel(&policy_info->scheduling_timer);
856 }
857
858 mali_error kbasep_js_policy_init_ctx(kbase_device *kbdev, kbase_context *kctx)
859 {
860         kbasep_js_device_data *js_devdata;
861         kbasep_js_policy_cfs_ctx *ctx_info;
862         kbasep_js_policy_cfs *policy_info;
863         u32 i;
864         int policy;
865
866         KBASE_DEBUG_ASSERT(kbdev != NULL);
867         KBASE_DEBUG_ASSERT(kctx != NULL);
868
869         js_devdata = &kbdev->js_data;
870         policy_info = &kbdev->js_data.policy.cfs;
871         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
872
873         KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_INIT_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
874
875         for (i = 0; i < policy_info->num_core_req_variants; ++i)
876                 INIT_LIST_HEAD(&ctx_info->job_list_head[i]);
877
878         policy = current->policy;
879         if (policy == SCHED_FIFO || policy == SCHED_RR) {
880                 ctx_info->process_rt_policy = MALI_TRUE;
881                 ctx_info->process_priority = (((MAX_RT_PRIO - 1) - current->rt_priority) / 5) - 20;
882         } else {
883                 ctx_info->process_rt_policy = MALI_FALSE;
884                 ctx_info->process_priority = (current->static_prio - MAX_RT_PRIO) - 20;
885         }
886
887         ctx_info->bag_total_priority = 0;
888         ctx_info->bag_total_nr_atoms = 0;
889
890         /* Initial runtime (relative to least-run context runtime)
891          *
892          * This uses the Policy Queue's most up-to-date head_runtime_us by using the
893          * queue mutex to issue memory barriers - also ensure future updates to
894          * head_runtime_us occur strictly after this context is initialized */
895         mutex_lock(&js_devdata->queue_mutex);
896
897         /* No need to hold the the runpool_irq.lock here, because we're initializing
898          * the value, and the context is definitely not being updated in the
899          * runpool at this point. The queue_mutex ensures the memory barrier. */
900         ctx_info->runtime_us = policy_info->head_runtime_us + priority_weight(ctx_info, (u64) js_devdata->cfs_ctx_runtime_init_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u));
901
902         mutex_unlock(&js_devdata->queue_mutex);
903
904         return MALI_ERROR_NONE;
905 }
906
907 void kbasep_js_policy_term_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
908 {
909         kbasep_js_policy_cfs_ctx *ctx_info;
910         kbasep_js_policy_cfs *policy_info;
911         u32 i;
912
913         KBASE_DEBUG_ASSERT(js_policy != NULL);
914         KBASE_DEBUG_ASSERT(kctx != NULL);
915
916         policy_info = &js_policy->cfs;
917         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
918
919         {
920                 kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
921                 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_TERM_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
922         }
923
924         /* ASSERT that no jobs are present */
925         for (i = 0; i < policy_info->num_core_req_variants; ++i)
926                 KBASE_DEBUG_ASSERT(list_empty(&ctx_info->job_list_head[i]));
927
928         /* No work to do */
929 }
930
931 /*
932  * Context Management
933  */
934
935 void kbasep_js_policy_enqueue_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
936 {
937         kbasep_js_policy_cfs *policy_info;
938         kbasep_js_policy_cfs_ctx *ctx_info;
939         kbase_context *head_ctx;
940         kbase_context *list_kctx = NULL;
941         kbasep_js_device_data *js_devdata;
942         struct list_head *queue_head;
943         struct list_head *pos;
944         kbase_device *kbdev;
945         atomic64_t *least_runtime_us;
946         u64 head_runtime;
947
948         KBASE_DEBUG_ASSERT(js_policy != NULL);
949         KBASE_DEBUG_ASSERT(kctx != NULL);
950
951         policy_info = &js_policy->cfs;
952         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
953         kbdev = container_of(js_policy, kbase_device, js_data.policy);
954         js_devdata = &kbdev->js_data;
955
956         KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_ENQUEUE_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
957
958         /* ASSERT about scheduled-ness/queued-ness */
959         kbasep_js_debug_check(policy_info, kctx, KBASEP_JS_CHECK_NOTQUEUED);
960
961         /* Clamp the runtime to prevent DoS attacks through "stored-up" runtime */
962         if (policy_info->head_runtime_us > ctx_info->runtime_us + (u64) js_devdata->cfs_ctx_runtime_min_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u)) {
963                 /* No need to hold the the runpool_irq.lock here, because we're essentially
964                  * initializing the value, and the context is definitely not being updated in the
965                  * runpool at this point. The queue_mutex held by the caller ensures the memory
966                  * barrier. */
967                 ctx_info->runtime_us = policy_info->head_runtime_us - (u64) js_devdata->cfs_ctx_runtime_min_slices * (u64) (js_devdata->ctx_timeslice_ns / 1000u);
968         }
969
970         /* Find the position where the context should be enqueued */
971         if (ctx_info->process_rt_policy) {
972                 queue_head = &policy_info->ctx_rt_queue_head;
973                 least_runtime_us = &policy_info->rt_least_runtime_us;
974         } else {
975                 queue_head = &policy_info->ctx_queue_head;
976                 least_runtime_us = &policy_info->least_runtime_us;
977         }
978
979         if (list_empty(queue_head)) {
980                 list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, queue_head);
981         } else {
982                 list_for_each(pos, queue_head) {
983                         kbasep_js_policy_cfs_ctx *list_ctx_info;
984
985                         list_kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
986                         list_ctx_info = &list_kctx->jctx.sched_info.runpool.policy_ctx.cfs;
987
988                         if ((kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0)
989                                 break;
990
991                         if ((list_ctx_info->runtime_us > ctx_info->runtime_us) && ((list_kctx->jctx.sched_info.ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) == 0))
992                                 break;
993                 }
994
995                 /* Add the context to the queue */
996                 list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, &list_kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
997         }
998
999         /* Ensure least_runtime_us is up to date*/
1000         head_ctx = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
1001         head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
1002         atomic64_set(least_runtime_us, head_runtime);
1003 }
1004
1005 mali_bool kbasep_js_policy_dequeue_head_ctx(kbasep_js_policy *js_policy, kbase_context ** const kctx_ptr)
1006 {
1007         kbasep_js_policy_cfs *policy_info;
1008         kbase_context *head_ctx;
1009         struct list_head *queue_head;
1010         atomic64_t *least_runtime_us;
1011         kbase_device *kbdev;
1012
1013         KBASE_DEBUG_ASSERT(js_policy != NULL);
1014         KBASE_DEBUG_ASSERT(kctx_ptr != NULL);
1015
1016         policy_info = &js_policy->cfs;
1017         kbdev = container_of(js_policy, kbase_device, js_data.policy);
1018
1019         /* attempt to dequeue from the 'realttime' queue first */
1020         if (list_empty(&policy_info->ctx_rt_queue_head)) {
1021                 if (list_empty(&policy_info->ctx_queue_head)) {
1022                         /* Nothing to dequeue */
1023                         return MALI_FALSE;
1024                 } else {
1025                         queue_head = &policy_info->ctx_queue_head;
1026                         least_runtime_us = &policy_info->least_runtime_us;
1027                 }
1028         } else {
1029                 queue_head = &policy_info->ctx_rt_queue_head;
1030                 least_runtime_us = &policy_info->rt_least_runtime_us;
1031         }
1032
1033         /* Contexts are dequeued from the front of the queue */
1034         *kctx_ptr = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
1035         /* If dequeuing will empty the list, then set least_runtime_us prior to deletion */
1036         if (queue_head->next->next == queue_head)
1037                 atomic64_set(least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
1038         list_del(queue_head->next);
1039
1040         KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_DEQUEUE_HEAD_CTX, *kctx_ptr, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, *kctx_ptr));
1041
1042         /* Update the head runtime */
1043         if (!list_empty(queue_head)) {
1044                 u64 head_runtime;
1045
1046                 head_ctx = list_entry(queue_head->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
1047
1048                 /* No need to hold the the runpool_irq.lock here for reading - the
1049                  * context is definitely not being updated in the runpool at this
1050                  * point. The queue_mutex held by the caller ensures the memory barrier. */
1051                 head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
1052
1053                 if (head_runtime > policy_info->head_runtime_us)
1054                         policy_info->head_runtime_us = head_runtime;
1055
1056                 atomic64_set(least_runtime_us, head_runtime);
1057         }
1058
1059         return MALI_TRUE;
1060 }
1061
1062 mali_bool kbasep_js_policy_try_evict_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
1063 {
1064         kbasep_js_policy_cfs_ctx *ctx_info;
1065         kbasep_js_policy_cfs *policy_info;
1066         mali_bool is_present;
1067         struct list_head *queue_head;
1068         atomic64_t *least_runtime_us;
1069         struct list_head *qhead;
1070         kbase_device *kbdev;
1071
1072         KBASE_DEBUG_ASSERT(js_policy != NULL);
1073         KBASE_DEBUG_ASSERT(kctx != NULL);
1074
1075         policy_info = &js_policy->cfs;
1076         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1077         kbdev = container_of(js_policy, kbase_device, js_data.policy);
1078
1079         if (ctx_info->process_rt_policy) {
1080                 queue_head = &policy_info->ctx_rt_queue_head;
1081                 least_runtime_us = &policy_info->rt_least_runtime_us;
1082         } else {
1083                 queue_head = &policy_info->ctx_queue_head;
1084                 least_runtime_us = &policy_info->least_runtime_us;
1085         }
1086
1087         qhead = queue_head;
1088
1089         is_present = kbasep_list_member_of(qhead, &kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
1090
1091         KBASE_TRACE_ADD_REFCOUNT_INFO(kbdev, JS_POLICY_TRY_EVICT_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx), is_present);
1092
1093         if (is_present != MALI_FALSE) {
1094                 kbase_context *head_ctx;
1095                 qhead = queue_head;
1096
1097                 /* If dequeuing will empty the list, then set least_runtime_us prior to deletion */
1098                 if (queue_head->next->next == queue_head)
1099                         atomic64_set(least_runtime_us, KBASEP_JS_RUNTIME_EMPTY);
1100
1101                 /* Remove the context */
1102                 list_del(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
1103
1104                 qhead = queue_head;
1105                 /* Update the head runtime */
1106                 if (!list_empty(qhead)) {
1107                         u64 head_runtime;
1108
1109                         head_ctx = list_entry(qhead->next, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
1110
1111                         /* No need to hold the the runpool_irq.lock here for reading - the
1112                          * context is definitely not being updated in the runpool at this
1113                          * point. The queue_mutex held by the caller ensures the memory barrier. */
1114                         head_runtime = head_ctx->jctx.sched_info.runpool.policy_ctx.cfs.runtime_us;
1115
1116                         if (head_runtime > policy_info->head_runtime_us)
1117                                 policy_info->head_runtime_us = head_runtime;
1118
1119                         atomic64_set(least_runtime_us, head_runtime);
1120                 }
1121         }
1122
1123         return is_present;
1124 }
1125
1126 void kbasep_js_policy_foreach_ctx_job(kbasep_js_policy *js_policy, kbase_context *kctx,
1127         kbasep_js_policy_ctx_job_cb callback, mali_bool detach_jobs)
1128 {
1129         kbasep_js_policy_cfs *policy_info;
1130         kbasep_js_policy_cfs_ctx *ctx_info;
1131         kbase_device *kbdev;
1132         u32 i;
1133
1134         KBASE_DEBUG_ASSERT(js_policy != NULL);
1135         KBASE_DEBUG_ASSERT(kctx != NULL);
1136
1137         kbdev = container_of(js_policy, kbase_device, js_data.policy);
1138         policy_info = &js_policy->cfs;
1139         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1140
1141         KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_FOREACH_CTX_JOBS, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt(kbdev, kctx));
1142
1143         /* Invoke callback on jobs on each variant in turn */
1144         for (i = 0; i < policy_info->num_core_req_variants; ++i) {
1145                 struct list_head *job_list;
1146                 struct kbase_jd_atom *atom;
1147                 struct kbase_jd_atom *tmp_iter;
1148                 job_list = &ctx_info->job_list_head[i];
1149                 /* Invoke callback on all kbase_jd_atoms in this list, optionally
1150                  * removing them from the list */
1151                 list_for_each_entry_safe(atom, tmp_iter, job_list, sched_info.cfs.list) {
1152                         if (detach_jobs)
1153                                 list_del(&atom->sched_info.cfs.list);
1154                         callback(kbdev, atom);
1155                 }
1156         }
1157
1158 }
1159
1160 void kbasep_js_policy_runpool_add_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
1161 {
1162         kbasep_js_policy_cfs *policy_info;
1163         kbasep_js_device_data *js_devdata;
1164         kbase_device *kbdev;
1165
1166         KBASE_DEBUG_ASSERT(js_policy != NULL);
1167         KBASE_DEBUG_ASSERT(kctx != NULL);
1168
1169         policy_info = &js_policy->cfs;
1170         js_devdata = container_of(js_policy, kbasep_js_device_data, policy);
1171
1172         kbdev = kctx->kbdev;
1173
1174         {
1175                 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_RUNPOOL_ADD_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx));
1176         }
1177
1178         /* ASSERT about scheduled-ness/queued-ness */
1179         kbasep_js_debug_check(policy_info, kctx, KBASEP_JS_CHECK_NOTSCHEDULED);
1180
1181         /* All enqueued contexts go to the back of the runpool */
1182         list_add_tail(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list, &policy_info->scheduled_ctxs_head);
1183
1184         if (timer_callback_should_run(kbdev) != MALI_FALSE && policy_info->timer_running == MALI_FALSE) {
1185                 hrtimer_start(&policy_info->scheduling_timer, HR_TIMER_DELAY_NSEC(js_devdata->scheduling_tick_ns), HRTIMER_MODE_REL);
1186
1187                 KBASE_TRACE_ADD(kbdev, JS_POLICY_TIMER_START, NULL, NULL, 0u, 0u);
1188                 policy_info->timer_running = MALI_TRUE;
1189         }
1190 }
1191
1192 void kbasep_js_policy_runpool_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
1193 {
1194         kbasep_js_policy_cfs *policy_info;
1195
1196         KBASE_DEBUG_ASSERT(js_policy != NULL);
1197         KBASE_DEBUG_ASSERT(kctx != NULL);
1198
1199         policy_info = &js_policy->cfs;
1200
1201         {
1202                 kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
1203                 KBASE_TRACE_ADD_REFCOUNT(kbdev, JS_POLICY_RUNPOOL_REMOVE_CTX, kctx, NULL, 0u, kbasep_js_policy_trace_get_refcnt_nolock(kbdev, kctx));
1204         }
1205
1206         /* ASSERT about scheduled-ness/queued-ness */
1207         kbasep_js_debug_check(policy_info, kctx, KBASEP_JS_CHECK_SCHEDULED);
1208
1209         /* No searching or significant list maintenance required to remove this context */
1210         list_del(&kctx->jctx.sched_info.runpool.policy_ctx.cfs.list);
1211
1212 }
1213
1214 mali_bool kbasep_js_policy_should_remove_ctx(kbasep_js_policy *js_policy, kbase_context *kctx)
1215 {
1216         kbasep_js_policy_cfs_ctx *ctx_info;
1217         kbasep_js_policy_cfs *policy_info;
1218         kbasep_js_device_data *js_devdata;
1219         u64 least_runtime_us;
1220
1221         KBASE_DEBUG_ASSERT(js_policy != NULL);
1222         KBASE_DEBUG_ASSERT(kctx != NULL);
1223
1224         policy_info = &js_policy->cfs;
1225         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1226         js_devdata = container_of(js_policy, kbasep_js_device_data, policy);
1227
1228         if (ctx_info->process_rt_policy)
1229                 least_runtime_us = atomic64_read(&policy_info->rt_least_runtime_us);
1230         else
1231                 least_runtime_us = atomic64_read(&policy_info->least_runtime_us);
1232
1233         if (KBASEP_JS_RUNTIME_EMPTY == least_runtime_us) {
1234                 /* Queue is empty */
1235                 return MALI_FALSE;
1236         }
1237
1238         if ((least_runtime_us + priority_weight(ctx_info, (u64) (js_devdata->ctx_timeslice_ns / 1000u)))
1239             < ctx_info->runtime_us) {
1240                 /* The context is scheduled out if it's not the least-run context anymore.
1241                  * The "real" head runtime is used instead of the cached runtime so the current
1242                  * context is not scheduled out when there is less contexts than address spaces.
1243                  */
1244                 return MALI_TRUE;
1245         }
1246
1247         return MALI_FALSE;
1248 }
1249
1250 void kbasep_js_policy_runpool_timers_sync(kbasep_js_policy *js_policy)
1251 {
1252         kbasep_js_policy_cfs *policy_info;
1253         kbase_device *kbdev;
1254         kbasep_js_device_data *js_devdata;
1255
1256         KBASE_DEBUG_ASSERT(js_policy != NULL);
1257
1258         policy_info = &js_policy->cfs;
1259         kbdev = container_of(js_policy, kbase_device, js_data.policy);
1260         js_devdata = &kbdev->js_data;
1261
1262         if (!timer_callback_should_run(kbdev)) {
1263                 unsigned long flags;
1264
1265                 /* If the timer is running now, synchronize with it by
1266                  * locking/unlocking its spinlock, to ensure it's not using an old value
1267                  * from timer_callback_should_run() */
1268                 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
1269                 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
1270
1271                 /* From now on, return value of timer_callback_should_run() will also
1272                  * cause the timer to not requeue itself. Its return value cannot
1273                  * change, because it depends on variables updated with the
1274                  * runpool_mutex held, which the caller of this must also hold */
1275                 hrtimer_cancel(&policy_info->scheduling_timer);
1276
1277                 policy_info->timer_running = MALI_FALSE;
1278         }
1279 }
1280
1281 /*
1282  * Job Chain Management
1283  */
1284
1285 mali_error kbasep_js_policy_init_job(const kbasep_js_policy *js_policy, const kbase_context *kctx, kbase_jd_atom *katom)
1286 {
1287         const kbasep_js_policy_cfs *policy_info;
1288
1289         KBASE_DEBUG_ASSERT(js_policy != NULL);
1290         KBASE_DEBUG_ASSERT(katom != NULL);
1291         KBASE_DEBUG_ASSERT(kctx != NULL);
1292
1293         policy_info = &js_policy->cfs;
1294
1295         /* Determine the job's index into the job list head, will return error if the
1296          * atom is malformed and so is reported. */
1297         return cached_variant_idx_init(policy_info, kctx, katom);
1298 }
1299
1300 void kbasep_js_policy_register_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom)
1301 {
1302         kbasep_js_policy_cfs_ctx *ctx_info;
1303
1304         KBASE_DEBUG_ASSERT(js_policy != NULL);
1305         KBASE_DEBUG_ASSERT(katom != NULL);
1306         KBASE_DEBUG_ASSERT(kctx != NULL);
1307
1308         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1309
1310         /* Adjust context priority to include the new job */
1311         ctx_info->bag_total_nr_atoms++;
1312         ctx_info->bag_total_priority += katom->nice_prio;
1313
1314         /* Get average priority and convert to NICE range -20..19 */
1315         if (ctx_info->bag_total_nr_atoms)
1316                 ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
1317 }
1318
1319 void kbasep_js_policy_deregister_job(kbasep_js_policy *js_policy, kbase_context *kctx, kbase_jd_atom *katom)
1320 {
1321         kbasep_js_policy_cfs_ctx *ctx_info;
1322
1323         KBASE_DEBUG_ASSERT(js_policy != NULL);
1324         CSTD_UNUSED(js_policy);
1325         KBASE_DEBUG_ASSERT(katom != NULL);
1326         KBASE_DEBUG_ASSERT(kctx != NULL);
1327
1328         ctx_info = &kctx->jctx.sched_info.runpool.policy_ctx.cfs;
1329
1330         /* Adjust context priority to no longer include removed job */
1331         KBASE_DEBUG_ASSERT(ctx_info->bag_total_nr_atoms > 0);
1332         ctx_info->bag_total_nr_atoms--;
1333         ctx_info->bag_total_priority -= katom->nice_prio;
1334         KBASE_DEBUG_ASSERT(ctx_info->bag_total_priority >= 0);
1335
1336         /* Get average priority and convert to NICE range -20..19 */
1337         if (ctx_info->bag_total_nr_atoms)
1338                 ctx_info->bag_priority = (ctx_info->bag_total_priority / ctx_info->bag_total_nr_atoms) - 20;
1339 }
1340 KBASE_EXPORT_TEST_API(kbasep_js_policy_deregister_job)
1341
1342 mali_bool kbasep_js_policy_dequeue_job(kbase_device *kbdev,
1343                                        int job_slot_idx,
1344                                        kbase_jd_atom ** const katom_ptr)
1345 {
1346         kbasep_js_device_data *js_devdata;
1347         kbasep_js_policy_cfs *policy_info;
1348         kbase_context *kctx;
1349         u32 variants_supported;
1350         struct list_head *pos;
1351
1352         KBASE_DEBUG_ASSERT(kbdev != NULL);
1353         KBASE_DEBUG_ASSERT(katom_ptr != NULL);
1354         KBASE_DEBUG_ASSERT(job_slot_idx < BASE_JM_MAX_NR_SLOTS);
1355
1356         js_devdata = &kbdev->js_data;
1357         policy_info = &js_devdata->policy.cfs;
1358
1359         /* Get the variants for this slot */
1360         if (kbdev->gpu_props.num_core_groups > 1 && kbasep_js_ctx_attr_is_attr_on_runpool(kbdev, KBASEP_JS_CTX_ATTR_COMPUTE_ALL_CORES) != MALI_FALSE) {
1361                 /* SS-allcore state, and there's more than one coregroup */
1362                 variants_supported = get_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_allcore_state, job_slot_idx);
1363         } else {
1364                 /* SS-state */
1365                 variants_supported = get_slot_to_variant_lookup(policy_info->slot_to_variant_lookup_ss_state, job_slot_idx);
1366         }
1367
1368         /* First pass through the runpool we consider the realtime priority jobs */
1369         list_for_each(pos, &policy_info->scheduled_ctxs_head) {
1370                 kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
1371                 if (kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy) {
1372                         if (dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx)) {
1373                                 /* Realtime policy job matched */
1374                                 return MALI_TRUE;
1375                         }
1376                 }
1377         }
1378
1379         /* Second pass through the runpool we consider the non-realtime priority jobs */
1380         list_for_each(pos, &policy_info->scheduled_ctxs_head) {
1381                 kctx = list_entry(pos, kbase_context, jctx.sched_info.runpool.policy_ctx.cfs.list);
1382                 if (kctx->jctx.sched_info.runpool.policy_ctx.cfs.process_rt_policy == MALI_FALSE) {
1383                         if (dequeue_job(kbdev, kctx, variants_supported, katom_ptr, job_slot_idx)) {
1384                                 /* Non-realtime policy job matched */
1385                                 return MALI_TRUE;
1386                         }
1387                 }
1388         }
1389
1390         /* By this point, no contexts had a matching job */
1391         return MALI_FALSE;
1392 }
1393
1394 void kbasep_js_policy_enqueue_job(kbasep_js_policy *js_policy, kbase_jd_atom *katom)
1395 {
1396         kbasep_js_policy_cfs_job *job_info;
1397         kbasep_js_policy_cfs_ctx *ctx_info;
1398         kbase_context *parent_ctx;
1399
1400         KBASE_DEBUG_ASSERT(js_policy != NULL);
1401         KBASE_DEBUG_ASSERT(katom != NULL);
1402         parent_ctx = katom->kctx;
1403         KBASE_DEBUG_ASSERT(parent_ctx != NULL);
1404
1405         job_info = &katom->sched_info.cfs;
1406         ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1407
1408         {
1409                 kbase_device *kbdev = container_of(js_policy, kbase_device, js_data.policy);
1410                 KBASE_TRACE_ADD(kbdev, JS_POLICY_ENQUEUE_JOB, katom->kctx, katom, katom->jc, 0);
1411         }
1412         list_add_tail(&katom->sched_info.cfs.list, &ctx_info->job_list_head[job_info->cached_variant_idx]);
1413 }
1414
1415 void kbasep_js_policy_log_job_result(kbasep_js_policy *js_policy, kbase_jd_atom *katom, u64 time_spent_us)
1416 {
1417         kbasep_js_policy_cfs_ctx *ctx_info;
1418         kbase_context *parent_ctx;
1419         KBASE_DEBUG_ASSERT(js_policy != NULL);
1420         KBASE_DEBUG_ASSERT(katom != NULL);
1421         CSTD_UNUSED(js_policy);
1422
1423         parent_ctx = katom->kctx;
1424         KBASE_DEBUG_ASSERT(parent_ctx != NULL);
1425
1426         ctx_info = &parent_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1427
1428         ctx_info->runtime_us += priority_weight(ctx_info, time_spent_us);
1429 }
1430
1431 mali_bool kbasep_js_policy_ctx_has_priority(kbasep_js_policy *js_policy, kbase_context *current_ctx, kbase_context *new_ctx)
1432 {
1433         kbasep_js_policy_cfs_ctx *current_ctx_info;
1434         kbasep_js_policy_cfs_ctx *new_ctx_info;
1435
1436         KBASE_DEBUG_ASSERT(current_ctx != NULL);
1437         KBASE_DEBUG_ASSERT(new_ctx != NULL);
1438         CSTD_UNUSED(js_policy);
1439
1440         current_ctx_info = &current_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1441         new_ctx_info = &new_ctx->jctx.sched_info.runpool.policy_ctx.cfs;
1442
1443         if ((current_ctx_info->process_rt_policy == MALI_FALSE) && (new_ctx_info->process_rt_policy == MALI_TRUE))
1444                 return MALI_TRUE;
1445
1446         if ((current_ctx_info->process_rt_policy == new_ctx_info->process_rt_policy) && (current_ctx_info->bag_priority > new_ctx_info->bag_priority))
1447                 return MALI_TRUE;
1448
1449         return MALI_FALSE;
1450 }