3 * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
19 * @file mali_kbase_replay.c
20 * Replay soft job handlers
23 #include <linux/dma-mapping.h>
24 #include <mali_kbase_config.h>
25 #include <mali_kbase.h>
26 #include <mali_kbase_mem.h>
27 #include <mali_kbase_mem_linux.h>
29 #define JOB_NOT_STARTED 0
30 #define JOB_TYPE_MASK 0xfe
31 #define JOB_TYPE_NULL (1 << 1)
32 #define JOB_TYPE_VERTEX (5 << 1)
33 #define JOB_TYPE_TILER (7 << 1)
34 #define JOB_TYPE_FUSED (8 << 1)
35 #define JOB_TYPE_FRAGMENT (9 << 1)
37 #define JOB_FLAG_DESC_SIZE (1 << 0)
38 #define JOB_FLAG_PERFORM_JOB_BARRIER (1 << 8)
40 #define JOB_HEADER_32_FBD_OFFSET (31*4)
41 #define JOB_HEADER_64_FBD_OFFSET (44*4)
43 #define FBD_POINTER_MASK (~0x3f)
45 #define SFBD_TILER_OFFSET (48*4)
47 #define MFBD_TILER_OFFSET (14*4)
49 #define FBD_HIERARCHY_WEIGHTS 8
50 #define FBD_HIERARCHY_MASK_MASK 0x1fff
54 #define HIERARCHY_WEIGHTS 13
56 #define JOB_HEADER_ID_MAX 0xffff
58 #define JOB_SOURCE_ID(status) (((status) >> 16) & 0xFFFF)
59 #define JOB_POLYGON_LIST (0x03)
63 u32 not_complete_index;
79 static void dump_job_head(struct kbase_context *kctx, char *head_str,
82 #ifdef CONFIG_MALI_DEBUG
83 dev_dbg(kctx->kbdev->dev, "%s\n", head_str);
84 dev_dbg(kctx->kbdev->dev, "addr = %p\n"
86 "not_complete_index = %x\n"
90 "dependencies = %x,%x\n",
91 job, job->status, job->not_complete_index,
92 job->fault_addr, job->flags, job->index,
94 job->dependencies[1]);
96 if (job->flags & JOB_FLAG_DESC_SIZE)
97 dev_dbg(kctx->kbdev->dev, "next = %llx\n",
100 dev_dbg(kctx->kbdev->dev, "next = %x\n",
105 static int kbasep_replay_reset_sfbd(struct kbase_context *kctx,
106 u64 fbd_address, u64 tiler_heap_free,
107 u16 hierarchy_mask, u32 default_weight)
113 u64 heap_free_address;
115 u32 weights[FBD_HIERARCHY_WEIGHTS];
117 struct kbase_vmap_struct map;
119 dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
121 fbd_tiler = kbase_vmap(kctx, fbd_address + SFBD_TILER_OFFSET,
122 sizeof(*fbd_tiler), &map);
124 dev_err(kctx->kbdev->dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
128 #ifdef CONFIG_MALI_DEBUG
129 dev_dbg(kctx->kbdev->dev,
132 "heap_free_address = %llx\n",
133 fbd_tiler->flags, fbd_tiler->heap_free_address);
135 if (hierarchy_mask) {
136 u32 weights[HIERARCHY_WEIGHTS];
137 u16 old_hierarchy_mask = fbd_tiler->flags &
138 FBD_HIERARCHY_MASK_MASK;
141 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
142 if (old_hierarchy_mask & (1 << i)) {
143 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
144 weights[i] = fbd_tiler->weights[j++];
146 weights[i] = default_weight;
151 dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n",
152 old_hierarchy_mask, hierarchy_mask);
154 for (i = 0; i < HIERARCHY_WEIGHTS; i++)
155 dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
160 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
161 if (hierarchy_mask & (1 << i)) {
162 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
164 dev_dbg(kctx->kbdev->dev, " Writing hierarchy level %02d (%08x) to %d\n",
167 fbd_tiler->weights[j++] = weights[i];
171 for (; j < FBD_HIERARCHY_WEIGHTS; j++)
172 fbd_tiler->weights[j] = 0;
174 fbd_tiler->flags = hierarchy_mask | (1 << 16);
177 fbd_tiler->heap_free_address = tiler_heap_free;
179 dev_dbg(kctx->kbdev->dev, "heap_free_address=%llx flags=%x\n",
180 fbd_tiler->heap_free_address, fbd_tiler->flags);
182 kbase_vunmap(kctx, &map);
187 static int kbasep_replay_reset_mfbd(struct kbase_context *kctx,
188 u64 fbd_address, u64 tiler_heap_free,
189 u16 hierarchy_mask, u32 default_weight)
191 struct kbase_vmap_struct map;
196 u64 heap_free_address;
198 u32 weights[FBD_HIERARCHY_WEIGHTS];
201 dev_dbg(kctx->kbdev->dev, "fbd_address: %llx\n", fbd_address);
203 fbd_tiler = kbase_vmap(kctx, fbd_address + MFBD_TILER_OFFSET,
204 sizeof(*fbd_tiler), &map);
206 dev_err(kctx->kbdev->dev,
207 "kbasep_replay_reset_fbd: failed to map fbd\n");
211 #ifdef CONFIG_MALI_DEBUG
212 dev_dbg(kctx->kbdev->dev, "FBD tiler:\n"
214 "heap_free_address = %llx\n",
216 fbd_tiler->heap_free_address);
218 if (hierarchy_mask) {
219 u32 weights[HIERARCHY_WEIGHTS];
220 u16 old_hierarchy_mask = (fbd_tiler->flags) &
221 FBD_HIERARCHY_MASK_MASK;
224 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
225 if (old_hierarchy_mask & (1 << i)) {
226 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
227 weights[i] = fbd_tiler->weights[j++];
229 weights[i] = default_weight;
234 dev_dbg(kctx->kbdev->dev, "Old hierarchy mask=%x New hierarchy mask=%x\n",
235 old_hierarchy_mask, hierarchy_mask);
237 for (i = 0; i < HIERARCHY_WEIGHTS; i++)
238 dev_dbg(kctx->kbdev->dev, " Hierarchy weight %02d: %08x\n",
243 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
244 if (hierarchy_mask & (1 << i)) {
245 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
247 dev_dbg(kctx->kbdev->dev,
248 " Writing hierarchy level %02d (%08x) to %d\n",
251 fbd_tiler->weights[j++] = weights[i];
255 for (; j < FBD_HIERARCHY_WEIGHTS; j++)
256 fbd_tiler->weights[j] = 0;
258 fbd_tiler->flags = hierarchy_mask | (1 << 16);
261 fbd_tiler->heap_free_address = tiler_heap_free;
263 kbase_vunmap(kctx, &map);
269 * @brief Reset the status of an FBD pointed to by a tiler job
271 * This performs two functions :
272 * - Set the hierarchy mask
273 * - Reset the tiler free heap address
275 * @param[in] kctx Context pointer
276 * @param[in] job_header Address of job header to reset.
277 * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
278 * @param[in] hierarchy_mask The hierarchy mask to use
279 * @param[in] default_weight Default hierarchy weight to write when no other
280 * weight is given in the FBD
281 * @param[in] job_64 true if this job is using 64-bit
284 * @return 0 on success, error code on failure
286 static int kbasep_replay_reset_tiler_job(struct kbase_context *kctx,
287 u64 job_header, u64 tiler_heap_free,
288 u16 hierarchy_mask, u32 default_weight, bool job_64)
290 struct kbase_vmap_struct map;
296 job_ext = kbase_vmap(kctx,
297 job_header + JOB_HEADER_64_FBD_OFFSET,
298 sizeof(*job_ext), &map);
301 dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
305 fbd_address = *job_ext;
307 kbase_vunmap(kctx, &map);
311 job_ext = kbase_vmap(kctx,
312 job_header + JOB_HEADER_32_FBD_OFFSET,
313 sizeof(*job_ext), &map);
316 dev_err(kctx->kbdev->dev, "kbasep_replay_reset_tiler_job: failed to map jc\n");
320 fbd_address = *job_ext;
322 kbase_vunmap(kctx, &map);
325 if (fbd_address & FBD_TYPE) {
326 return kbasep_replay_reset_mfbd(kctx,
327 fbd_address & FBD_POINTER_MASK,
332 return kbasep_replay_reset_sfbd(kctx,
333 fbd_address & FBD_POINTER_MASK,
341 * @brief Reset the status of a job
343 * This performs the following functions :
345 * - Reset the Job Status field of each job to NOT_STARTED.
346 * - Set the Job Type field of any Vertex Jobs to Null Job.
347 * - For any jobs using an FBD, set the Tiler Heap Free field to the value of
348 * the tiler_heap_free parameter, and set the hierarchy level mask to the
349 * hier_mask parameter.
350 * - Offset HW dependencies by the hw_job_id_offset parameter
351 * - Set the Perform Job Barrier flag if this job is the first in the chain
352 * - Read the address of the next job header
354 * @param[in] kctx Context pointer
355 * @param[in,out] job_header Address of job header to reset. Set to address
356 * of next job header on exit.
357 * @param[in] prev_jc Previous job chain to link to, if this job is
358 * the last in the chain.
359 * @param[in] hw_job_id_offset Offset for HW job IDs
360 * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
361 * @param[in] hierarchy_mask The hierarchy mask to use
362 * @param[in] default_weight Default hierarchy weight to write when no other
363 * weight is given in the FBD
364 * @param[in] first_in_chain true if this job is the first in the chain
365 * @param[in] fragment_chain true if this job is in the fragment chain
367 * @return 0 on success, error code on failure
369 static int kbasep_replay_reset_job(struct kbase_context *kctx,
370 u64 *job_header, u64 prev_jc,
371 u64 tiler_heap_free, u16 hierarchy_mask,
372 u32 default_weight, u16 hw_job_id_offset,
373 bool first_in_chain, bool fragment_chain)
375 struct job_head *job;
377 struct kbase_vmap_struct map;
379 job = kbase_vmap(kctx, *job_header, sizeof(*job), &map);
381 dev_err(kctx->kbdev->dev,
382 "kbasep_replay_parse_jc: failed to map jc\n");
386 dump_job_head(kctx, "Job header:", job);
388 if (job->status == JOB_NOT_STARTED && !fragment_chain) {
389 dev_err(kctx->kbdev->dev, "Job already not started\n");
392 job->status = JOB_NOT_STARTED;
394 if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_VERTEX)
395 job->flags = (job->flags & ~JOB_TYPE_MASK) | JOB_TYPE_NULL;
397 if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FUSED) {
398 dev_err(kctx->kbdev->dev, "Fused jobs can not be replayed\n");
403 job->flags |= JOB_FLAG_PERFORM_JOB_BARRIER;
405 if ((job->dependencies[0] + hw_job_id_offset) > JOB_HEADER_ID_MAX ||
406 (job->dependencies[1] + hw_job_id_offset) > JOB_HEADER_ID_MAX ||
407 (job->index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
408 dev_err(kctx->kbdev->dev,
409 "Job indicies/dependencies out of valid range\n");
413 if (job->dependencies[0])
414 job->dependencies[0] += hw_job_id_offset;
415 if (job->dependencies[1])
416 job->dependencies[1] += hw_job_id_offset;
418 job->index += hw_job_id_offset;
420 if (job->flags & JOB_FLAG_DESC_SIZE) {
421 new_job_header = job->next._64;
423 job->next._64 = prev_jc;
425 new_job_header = job->next._32;
427 job->next._32 = prev_jc;
429 dump_job_head(kctx, "Updated to:", job);
431 if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_TILER) {
432 bool job_64 = (job->flags & JOB_FLAG_DESC_SIZE) != 0;
434 if (kbasep_replay_reset_tiler_job(kctx, *job_header,
435 tiler_heap_free, hierarchy_mask,
436 default_weight, job_64) != 0)
439 } else if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FRAGMENT) {
442 if (job->flags & JOB_FLAG_DESC_SIZE)
443 fbd_address = job->fragment_fbd._64;
445 fbd_address = (u64)job->fragment_fbd._32;
447 if (fbd_address & FBD_TYPE) {
448 if (kbasep_replay_reset_mfbd(kctx,
449 fbd_address & FBD_POINTER_MASK,
452 default_weight) != 0)
455 if (kbasep_replay_reset_sfbd(kctx,
456 fbd_address & FBD_POINTER_MASK,
459 default_weight) != 0)
464 kbase_vunmap(kctx, &map);
466 *job_header = new_job_header;
471 kbase_vunmap(kctx, &map);
476 * @brief Find the highest job ID in a job chain
478 * @param[in] kctx Context pointer
479 * @param[in] jc Job chain start address
480 * @param[out] hw_job_id Highest job ID in chain
482 * @return 0 on success, error code on failure
484 static int kbasep_replay_find_hw_job_id(struct kbase_context *kctx,
485 u64 jc, u16 *hw_job_id)
488 struct job_head *job;
489 struct kbase_vmap_struct map;
491 dev_dbg(kctx->kbdev->dev,
492 "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
494 job = kbase_vmap(kctx, jc, sizeof(*job), &map);
496 dev_err(kctx->kbdev->dev, "failed to map jc\n");
501 if (job->index > *hw_job_id)
502 *hw_job_id = job->index;
504 if (job->flags & JOB_FLAG_DESC_SIZE)
509 kbase_vunmap(kctx, &map);
516 * @brief Reset the status of a number of jobs
518 * This function walks the provided job chain, and calls
519 * kbasep_replay_reset_job for each job. It also links the job chain to the
520 * provided previous job chain.
522 * The function will fail if any of the jobs passed already have status of
525 * @param[in] kctx Context pointer
526 * @param[in] jc Job chain to be processed
527 * @param[in] prev_jc Job chain to be added to. May be NULL
528 * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
529 * @param[in] hierarchy_mask The hierarchy mask to use
530 * @param[in] default_weight Default hierarchy weight to write when no other
531 * weight is given in the FBD
532 * @param[in] hw_job_id_offset Offset for HW job IDs
533 * @param[in] fragment_chain true if this chain is the fragment chain
535 * @return 0 on success, error code otherwise
537 static int kbasep_replay_parse_jc(struct kbase_context *kctx,
539 u64 tiler_heap_free, u16 hierarchy_mask,
540 u32 default_weight, u16 hw_job_id_offset,
543 bool first_in_chain = true;
546 dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
547 jc, hw_job_id_offset);
550 dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_jc: parsing jc=%llx\n", jc);
552 if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
553 tiler_heap_free, hierarchy_mask,
554 default_weight, hw_job_id_offset,
555 first_in_chain, fragment_chain) != 0)
558 first_in_chain = false;
561 if (fragment_chain &&
562 nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
563 dev_err(kctx->kbdev->dev,
564 "Exceeded maximum number of jobs in fragment chain\n");
573 * @brief Reset the status of a replay job, and set up dependencies
575 * This performs the actions to allow the replay job to be re-run following
576 * completion of the passed dependency.
578 * @param[in] katom The atom to be reset
579 * @param[in] dep_atom The dependency to be attached to the atom
581 static void kbasep_replay_reset_softjob(struct kbase_jd_atom *katom,
582 struct kbase_jd_atom *dep_atom)
584 katom->status = KBASE_JD_ATOM_STATE_QUEUED;
585 kbase_jd_katom_dep_set(&katom->dep[0], dep_atom, BASE_JD_DEP_TYPE_DATA);
586 list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
590 * @brief Allocate an unused katom
592 * This will search the provided context for an unused katom, and will mark it
593 * as KBASE_JD_ATOM_STATE_QUEUED.
595 * If no atoms are available then the function will fail.
597 * @param[in] kctx Context pointer
598 * @return An atom ID, or -1 on failure
600 static int kbasep_allocate_katom(struct kbase_context *kctx)
602 struct kbase_jd_context *jctx = &kctx->jctx;
605 for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
606 if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
607 jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
608 dev_dbg(kctx->kbdev->dev,
609 "kbasep_allocate_katom: Allocated atom %d\n",
619 * @brief Release a katom
621 * This will mark the provided atom as available, and remove any dependencies.
623 * For use on error path.
625 * @param[in] kctx Context pointer
626 * @param[in] atom_id ID of atom to release
628 static void kbasep_release_katom(struct kbase_context *kctx, int atom_id)
630 struct kbase_jd_context *jctx = &kctx->jctx;
632 dev_dbg(kctx->kbdev->dev, "kbasep_release_katom: Released atom %d\n",
635 while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
636 list_del(jctx->atoms[atom_id].dep_head[0].next);
638 while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
639 list_del(jctx->atoms[atom_id].dep_head[1].next);
641 jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
644 static void kbasep_replay_create_atom(struct kbase_context *kctx,
645 struct base_jd_atom_v2 *atom,
650 atom->extres_list.value = NULL;
653 atom->atom_number = atom_nr;
655 base_jd_atom_dep_set(&atom->pre_dep[0], 0 , BASE_JD_DEP_TYPE_INVALID);
656 base_jd_atom_dep_set(&atom->pre_dep[1], 0 , BASE_JD_DEP_TYPE_INVALID);
658 atom->udata.blob[0] = 0;
659 atom->udata.blob[1] = 0;
663 * @brief Create two atoms for the purpose of replaying jobs
665 * Two atoms are allocated and created. The jc pointer is not set at this
666 * stage. The second atom has a dependency on the first. The remaining fields
667 * are set up as follows :
669 * - No external resources. Any required external resources will be held by the
671 * - device_nr is set to 0. This is not relevant as
672 * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
673 * - Priority is inherited from the replay job.
675 * @param[out] t_atom Atom to use for tiler jobs
676 * @param[out] f_atom Atom to use for fragment jobs
677 * @param[in] prio Priority of new atom (inherited from replay soft
679 * @return 0 on success, error code on failure
681 static int kbasep_replay_create_atoms(struct kbase_context *kctx,
682 struct base_jd_atom_v2 *t_atom,
683 struct base_jd_atom_v2 *f_atom,
686 int t_atom_nr, f_atom_nr;
688 t_atom_nr = kbasep_allocate_katom(kctx);
690 dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
694 f_atom_nr = kbasep_allocate_katom(kctx);
696 dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
697 kbasep_release_katom(kctx, t_atom_nr);
701 kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
702 kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
704 base_jd_atom_dep_set(&f_atom->pre_dep[0], t_atom_nr , BASE_JD_DEP_TYPE_DATA);
709 #ifdef CONFIG_MALI_DEBUG
710 static void payload_dump(struct kbase_context *kctx, base_jd_replay_payload *payload)
714 dev_dbg(kctx->kbdev->dev, "Tiler jc list :\n");
715 next = payload->tiler_jc_list;
718 struct kbase_vmap_struct map;
719 base_jd_replay_jc *jc_struct;
721 jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &map);
726 dev_dbg(kctx->kbdev->dev, "* jc_struct=%p jc=%llx next=%llx\n",
727 jc_struct, jc_struct->jc, jc_struct->next);
729 next = jc_struct->next;
731 kbase_vunmap(kctx, &map);
737 * @brief Parse a base_jd_replay_payload provided by userspace
739 * This will read the payload from userspace, and parse the job chains.
741 * @param[in] kctx Context pointer
742 * @param[in] replay_atom Replay soft job atom
743 * @param[in] t_atom Atom to use for tiler jobs
744 * @param[in] f_atom Atom to use for fragment jobs
745 * @return 0 on success, error code on failure
747 static int kbasep_replay_parse_payload(struct kbase_context *kctx,
748 struct kbase_jd_atom *replay_atom,
749 struct base_jd_atom_v2 *t_atom,
750 struct base_jd_atom_v2 *f_atom)
752 base_jd_replay_payload *payload;
755 u16 hw_job_id_offset = 0;
757 struct kbase_vmap_struct map;
759 dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: replay_atom->jc = %llx sizeof(payload) = %zu\n",
760 replay_atom->jc, sizeof(payload));
762 payload = kbase_vmap(kctx, replay_atom->jc, sizeof(*payload), &map);
765 dev_err(kctx->kbdev->dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
769 #ifdef CONFIG_MALI_DEBUG
770 dev_dbg(kctx->kbdev->dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
771 dev_dbg(kctx->kbdev->dev, "Payload structure:\n"
772 "tiler_jc_list = %llx\n"
773 "fragment_jc = %llx\n"
774 "tiler_heap_free = %llx\n"
775 "fragment_hierarchy_mask = %x\n"
776 "tiler_hierarchy_mask = %x\n"
777 "hierarchy_default_weight = %x\n"
778 "tiler_core_req = %x\n"
779 "fragment_core_req = %x\n",
780 payload->tiler_jc_list,
781 payload->fragment_jc,
782 payload->tiler_heap_free,
783 payload->fragment_hierarchy_mask,
784 payload->tiler_hierarchy_mask,
785 payload->hierarchy_default_weight,
786 payload->tiler_core_req,
787 payload->fragment_core_req);
788 payload_dump(kctx, payload);
791 t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER;
792 f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER;
794 /* Sanity check core requirements*/
795 if ((t_atom->core_req & BASEP_JD_REQ_ATOM_TYPE &
796 ~BASE_JD_REQ_COHERENT_GROUP) != BASE_JD_REQ_T ||
797 (f_atom->core_req & BASEP_JD_REQ_ATOM_TYPE &
798 ~BASE_JD_REQ_COHERENT_GROUP) != BASE_JD_REQ_FS ||
799 t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
800 f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
801 dev_err(kctx->kbdev->dev, "Invalid core requirements\n");
805 /* Process tiler job chains */
806 next = payload->tiler_jc_list;
808 dev_err(kctx->kbdev->dev, "Invalid tiler JC list\n");
813 base_jd_replay_jc *jc_struct;
814 struct kbase_vmap_struct jc_map;
817 jc_struct = kbase_vmap(kctx, next, sizeof(*jc_struct), &jc_map);
820 dev_err(kctx->kbdev->dev, "Failed to map jc struct\n");
825 next = jc_struct->next;
829 kbase_vunmap(kctx, &jc_map);
832 u16 max_hw_job_id = 0;
834 if (kbasep_replay_find_hw_job_id(kctx, jc,
835 &max_hw_job_id) != 0)
838 if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
839 payload->tiler_heap_free,
840 payload->tiler_hierarchy_mask,
841 payload->hierarchy_default_weight,
842 hw_job_id_offset, false) != 0) {
846 hw_job_id_offset += max_hw_job_id;
851 t_atom->jc = prev_jc;
853 /* Process fragment job chain */
854 f_atom->jc = payload->fragment_jc;
855 if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
856 payload->tiler_heap_free,
857 payload->fragment_hierarchy_mask,
858 payload->hierarchy_default_weight, 0,
863 if (!t_atom->jc || !f_atom->jc) {
864 dev_err(kctx->kbdev->dev, "Invalid payload\n");
868 dev_dbg(kctx->kbdev->dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
869 t_atom->jc, f_atom->jc);
873 kbase_vunmap(kctx, &map);
878 static void kbase_replay_process_worker(struct work_struct *data)
880 struct kbase_jd_atom *katom;
881 struct kbase_context *kctx;
882 struct kbase_jd_context *jctx;
883 bool need_to_try_schedule_context = false;
885 struct base_jd_atom_v2 t_atom, f_atom;
886 struct kbase_jd_atom *t_katom, *f_katom;
887 base_jd_prio atom_prio;
889 katom = container_of(data, struct kbase_jd_atom, work);
893 mutex_lock(&jctx->lock);
895 atom_prio = kbasep_js_sched_prio_to_atom_prio(katom->sched_priority);
897 if (kbasep_replay_create_atoms(
898 kctx, &t_atom, &f_atom, atom_prio) != 0) {
899 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
903 t_katom = &jctx->atoms[t_atom.atom_number];
904 f_katom = &jctx->atoms[f_atom.atom_number];
906 if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) != 0) {
907 kbasep_release_katom(kctx, t_atom.atom_number);
908 kbasep_release_katom(kctx, f_atom.atom_number);
909 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
913 kbasep_replay_reset_softjob(katom, f_katom);
915 need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
916 if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
917 dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
918 kbasep_release_katom(kctx, f_atom.atom_number);
919 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
922 need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
923 if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
924 dev_err(kctx->kbdev->dev, "Replay failed to submit atom\n");
925 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
929 katom->event_code = BASE_JD_EVENT_DONE;
932 if (katom->event_code != BASE_JD_EVENT_DONE) {
933 kbase_disjoint_state_down(kctx->kbdev);
935 need_to_try_schedule_context |= jd_done_nolock(katom);
938 if (need_to_try_schedule_context)
939 kbase_js_sched_all(kctx->kbdev);
941 mutex_unlock(&jctx->lock);
945 * @brief Check job replay fault
947 * This will read the job payload, checks fault type and source, then decides
948 * whether replay is required.
950 * @param[in] katom The atom to be processed
951 * @return true (success) if replay required or false on failure.
953 static bool kbase_replay_fault_check(struct kbase_jd_atom *katom)
955 struct kbase_context *kctx = katom->kctx;
956 struct device *dev = kctx->kbdev->dev;
957 base_jd_replay_payload *payload;
960 struct job_head *job;
961 struct kbase_vmap_struct job_map;
962 struct kbase_vmap_struct map;
965 /* Replay job if fault is of type BASE_JD_EVENT_JOB_WRITE_FAULT or
966 * if force_replay is enabled.
968 if (BASE_JD_EVENT_TERMINATED == katom->event_code) {
970 } else if (BASE_JD_EVENT_JOB_WRITE_FAULT == katom->event_code) {
972 } else if (BASE_JD_EVENT_FORCE_REPLAY == katom->event_code) {
973 katom->event_code = BASE_JD_EVENT_DATA_INVALID_FAULT;
975 } else if (BASE_JD_EVENT_DATA_INVALID_FAULT != katom->event_code) {
976 /* No replay for faults of type other than
977 * BASE_JD_EVENT_DATA_INVALID_FAULT.
982 /* Job fault is BASE_JD_EVENT_DATA_INVALID_FAULT, now scan fragment jc
983 * to find out whether the source of exception is POLYGON_LIST. Replay
984 * is required if the source of fault is POLYGON_LIST.
986 payload = kbase_vmap(kctx, katom->jc, sizeof(*payload), &map);
988 dev_err(dev, "kbase_replay_fault_check: failed to map payload.\n");
992 #ifdef CONFIG_MALI_DEBUG
993 dev_dbg(dev, "kbase_replay_fault_check: payload=%p\n", payload);
994 dev_dbg(dev, "\nPayload structure:\n"
995 "fragment_jc = 0x%llx\n"
996 "fragment_hierarchy_mask = 0x%x\n"
997 "fragment_core_req = 0x%x\n",
998 payload->fragment_jc,
999 payload->fragment_hierarchy_mask,
1000 payload->fragment_core_req);
1002 /* Process fragment job chain */
1003 job_header = (u64) payload->fragment_jc;
1004 job_loop_detect = job_header;
1005 while (job_header) {
1006 job = kbase_vmap(kctx, job_header, sizeof(*job), &job_map);
1008 dev_err(dev, "failed to map jc\n");
1010 kbase_vunmap(kctx, &map);
1015 #ifdef CONFIG_MALI_DEBUG
1016 dev_dbg(dev, "\njob_head structure:\n"
1017 "Source ID:0x%x Access:0x%x Exception:0x%x\n"
1018 "at job addr = %p\n"
1019 "not_complete_index = 0x%x\n"
1020 "fault_addr = 0x%llx\n"
1023 "dependencies = 0x%x,0x%x\n",
1024 JOB_SOURCE_ID(job->status),
1025 ((job->status >> 8) & 0x3),
1026 (job->status & 0xFF),
1028 job->not_complete_index,
1032 job->dependencies[0],
1033 job->dependencies[1]);
1036 /* Replay only when the polygon list reader caused the
1037 * DATA_INVALID_FAULT */
1038 if ((BASE_JD_EVENT_DATA_INVALID_FAULT == katom->event_code) &&
1039 (JOB_POLYGON_LIST == JOB_SOURCE_ID(job->status))) {
1041 kbase_vunmap(kctx, &job_map);
1045 /* Move on to next fragment job in the list */
1046 if (job->flags & JOB_FLAG_DESC_SIZE)
1047 job_header = job->next._64;
1049 job_header = job->next._32;
1051 kbase_vunmap(kctx, &job_map);
1053 /* Job chain loop detected */
1054 if (job_header == job_loop_detect)
1059 kbase_vunmap(kctx, &map);
1066 * @brief Process a replay job
1068 * Called from kbase_process_soft_job.
1070 * On exit, if the job has completed, katom->event_code will have been updated.
1071 * If the job has not completed, and is replaying jobs, then the atom status
1072 * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
1074 * @param[in] katom The atom to be processed
1075 * @return false if the atom has completed
1076 * true if the atom is replaying jobs
1078 bool kbase_replay_process(struct kbase_jd_atom *katom)
1080 struct kbase_context *kctx = katom->kctx;
1081 struct kbase_jd_context *jctx = &kctx->jctx;
1083 if (katom->event_code == BASE_JD_EVENT_DONE) {
1084 dev_dbg(kctx->kbdev->dev, "Previous job succeeded - not replaying\n");
1086 if (katom->retry_count)
1087 kbase_disjoint_state_down(kctx->kbdev);
1092 if (jctx->sched_info.ctx.is_dying) {
1093 dev_dbg(kctx->kbdev->dev, "Not replaying; context is dying\n");
1095 if (katom->retry_count)
1096 kbase_disjoint_state_down(kctx->kbdev);
1101 /* Check job exception type and source before replaying. */
1102 if (!kbase_replay_fault_check(katom)) {
1103 dev_dbg(kctx->kbdev->dev,
1104 "Replay cancelled on event %x\n", katom->event_code);
1105 /* katom->event_code is already set to the failure code of the
1111 dev_warn(kctx->kbdev->dev, "Replaying jobs retry=%d\n",
1112 katom->retry_count);
1114 katom->retry_count++;
1116 if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
1117 dev_err(kctx->kbdev->dev, "Replay exceeded limit - failing jobs\n");
1119 kbase_disjoint_state_down(kctx->kbdev);
1121 /* katom->event_code is already set to the failure code of the
1126 /* only enter the disjoint state once for the whole time while the replay is ongoing */
1127 if (katom->retry_count == 1)
1128 kbase_disjoint_state_up(kctx->kbdev);
1130 INIT_WORK(&katom->work, kbase_replay_process_worker);
1131 queue_work(kctx->event_workq, &katom->work);