3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
19 * @file mali_kbase_replay.c
20 * Replay soft job handlers
23 #include <mali_kbase_config.h>
24 #include <mali_kbase.h>
25 #include <mali_kbase_mem.h>
26 #include <mali_kbase_debug.h>
28 #define JOB_NOT_STARTED 0
30 #define JOB_TYPE_MASK 0xfe
31 #define JOB_TYPE_NULL (1 << 1)
32 #define JOB_TYPE_VERTEX (5 << 1)
33 #define JOB_TYPE_TILER (7 << 1)
34 #define JOB_TYPE_FUSED (8 << 1)
35 #define JOB_TYPE_FRAGMENT (9 << 1)
37 #define JOB_FLAG_DESC_SIZE (1 << 0)
38 #define JOB_FLAG_PERFORM_JOB_BARRIER (1 << 8)
40 #define JOB_HEADER_32_FBD_OFFSET (31*4)
42 #define FBD_POINTER_MASK (~0x3f)
44 #define SFBD_TILER_OFFSET (48*4)
46 #define MFBD_TILER_FLAGS_OFFSET (15*4)
47 #define MFBD_TILER_OFFSET (16*4)
49 #define FBD_HIERARCHY_WEIGHTS 8
50 #define FBD_HIERARCHY_MASK_MASK 0x1fff
54 #define HIERARCHY_WEIGHTS 13
56 #define JOB_HEADER_ID_MAX 0xffff
58 typedef struct job_head
61 u32 not_complete_index;
79 static void dump_job_head(kbase_context *kctx, char *head_str, job_head *job)
81 #ifdef CONFIG_MALI_DEBUG
82 struct device *dev = kctx->kbdev->dev;
84 KBASE_LOG(2, dev, "%s\n", head_str);
85 KBASE_LOG(2, dev, "addr = %p\n"
87 "not_complete_index = %x\n"
91 "dependencies = %x,%x\n",
94 job->not_complete_index,
99 job->dependencies[1]);
101 if (job->flags & JOB_FLAG_DESC_SIZE)
102 KBASE_LOG(2, dev, "next = %llx\n", job->next._64);
104 KBASE_LOG(2, dev, "next = %x\n", job->next._32);
109 static void *kbasep_map_page(kbase_context *kctx, mali_addr64 gpu_addr,
112 void *cpu_addr = NULL;
114 kbase_va_region *region;
115 phys_addr_t *page_array;
117 region = kbase_region_tracker_find_region_enclosing_address(kctx,
119 if (!region || (region->flags & KBASE_REG_FREE))
122 page_index = (gpu_addr >> PAGE_SHIFT) - region->start_pfn;
123 if (page_index >= kbase_reg_current_backed_size(region))
126 page_array = kbase_get_phy_pages(region);
130 cpu_addr = kmap_atomic(pfn_to_page(PFN_DOWN(page_array[page_index])));
135 *phys_addr = page_array[page_index];
137 return cpu_addr + (gpu_addr & ~PAGE_MASK);
140 static void *kbasep_map_page_sync(kbase_context *kctx, mali_addr64 gpu_addr,
143 void *cpu_addr = kbasep_map_page(kctx, gpu_addr, phys_addr);
148 kbase_sync_to_cpu(*phys_addr,
149 (void *)((uintptr_t)cpu_addr & PAGE_MASK),
155 static void kbasep_unmap_page(void *cpu_addr)
157 kunmap_atomic((void *)((uintptr_t)cpu_addr & PAGE_MASK));
160 static void kbasep_unmap_page_sync(void *cpu_addr, u64 phys_addr)
162 kbase_sync_to_memory(phys_addr,
163 (void *)((uintptr_t)cpu_addr & PAGE_MASK),
166 kunmap_atomic((void *)((uintptr_t)cpu_addr & PAGE_MASK));
169 static mali_error kbasep_replay_reset_sfbd(kbase_context *kctx,
170 mali_addr64 fbd_address,
171 mali_addr64 tiler_heap_free,
181 u64 heap_free_address;
183 u32 weights[FBD_HIERARCHY_WEIGHTS];
185 struct device *dev = kctx->kbdev->dev;
187 KBASE_LOG(2, dev, "fbd_address: %llx\n", fbd_address);
189 fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + SFBD_TILER_OFFSET,
192 dev_err(dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
193 return MALI_ERROR_FUNCTION_FAILED;
195 #ifdef CONFIG_MALI_DEBUG
196 KBASE_LOG(2, dev, "FBD tiler:\n"
198 "heap_free_address = %llx\n",
200 fbd_tiler->heap_free_address);
202 if (hierarchy_mask) {
203 u32 weights[HIERARCHY_WEIGHTS];
204 u16 old_hierarchy_mask = fbd_tiler->flags &
205 FBD_HIERARCHY_MASK_MASK;
208 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
209 if (old_hierarchy_mask & (1 << i)) {
210 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
211 weights[i] = fbd_tiler->weights[j++];
213 weights[i] = default_weight;
219 "Old hierarchy mask=%x New hierarchy mask=%x\n",
220 old_hierarchy_mask, hierarchy_mask);
221 for (i = 0; i < HIERARCHY_WEIGHTS; i++)
222 KBASE_LOG(2, dev, " Hierarchy weight %02d: %08x\n",
227 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
228 if (hierarchy_mask & (1 << i)) {
229 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
232 " Writing hierarchy level %02d (%08x) to %d\n",
235 fbd_tiler->weights[j++] = weights[i];
239 for (; j < FBD_HIERARCHY_WEIGHTS; j++)
240 fbd_tiler->weights[j] = 0;
242 fbd_tiler->flags = hierarchy_mask | (1 << 16);
245 fbd_tiler->heap_free_address = tiler_heap_free;
247 KBASE_LOG(2, dev, "heap_free_address=%llx flags=%x\n",
248 fbd_tiler->heap_free_address, fbd_tiler->flags);
250 kbasep_unmap_page_sync(fbd_tiler, phys_addr);
252 return MALI_ERROR_NONE;
255 static mali_error kbasep_replay_reset_mfbd(kbase_context *kctx,
256 mali_addr64 fbd_address,
257 mali_addr64 tiler_heap_free,
261 u64 phys_addr, phys_addr_flags;
265 u64 heap_free_address;
267 u32 weights[FBD_HIERARCHY_WEIGHTS];
269 u32 *fbd_tiler_flags;
270 mali_bool flags_different_page;
271 struct device *dev = kctx->kbdev->dev;
273 KBASE_LOG(2, dev, "fbd_address: %llx\n", fbd_address);
275 fbd_tiler = kbasep_map_page_sync(kctx, fbd_address + MFBD_TILER_OFFSET,
277 if (((fbd_address + MFBD_TILER_OFFSET) & PAGE_MASK) !=
278 ((fbd_address + MFBD_TILER_FLAGS_OFFSET) & PAGE_MASK)) {
279 flags_different_page = MALI_TRUE;
280 fbd_tiler_flags = kbasep_map_page_sync(kctx,
281 fbd_address + MFBD_TILER_FLAGS_OFFSET,
284 flags_different_page = MALI_FALSE;
285 fbd_tiler_flags = (u32 *)((uintptr_t)fbd_tiler -
286 MFBD_TILER_OFFSET + MFBD_TILER_FLAGS_OFFSET);
289 if (!fbd_tiler || !fbd_tiler_flags) {
290 dev_err(dev, "kbasep_replay_reset_fbd: failed to map fbd\n");
292 if (fbd_tiler_flags && flags_different_page)
293 kbasep_unmap_page_sync(fbd_tiler_flags,
296 kbasep_unmap_page_sync(fbd_tiler, phys_addr);
298 return MALI_ERROR_FUNCTION_FAILED;
300 #ifdef CONFIG_MALI_DEBUG
301 KBASE_LOG(2, dev, "FBD tiler:\n"
302 "heap_free_address = %llx\n",
303 fbd_tiler->heap_free_address);
305 if (hierarchy_mask) {
306 u32 weights[HIERARCHY_WEIGHTS];
307 u16 old_hierarchy_mask = (*fbd_tiler_flags) &
308 FBD_HIERARCHY_MASK_MASK;
311 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
312 if (old_hierarchy_mask & (1 << i)) {
313 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
314 weights[i] = fbd_tiler->weights[j++];
317 weights[i] = default_weight;
322 "Old hierarchy mask=%x New hierarchy mask=%x\n",
323 old_hierarchy_mask, hierarchy_mask);
324 for (i = 0; i < HIERARCHY_WEIGHTS; i++)
325 KBASE_LOG(2, dev, " Hierarchy weight %02d: %08x\n",
330 for (i = 0; i < HIERARCHY_WEIGHTS; i++) {
331 if (hierarchy_mask & (1 << i)) {
332 KBASE_DEBUG_ASSERT(j < FBD_HIERARCHY_WEIGHTS);
335 " Writing hierarchy level %02d (%08x) to %d\n",
338 fbd_tiler->weights[j++] = weights[i];
342 for (; j < FBD_HIERARCHY_WEIGHTS; j++)
343 fbd_tiler->weights[j] = 0;
345 *fbd_tiler_flags = hierarchy_mask | (1 << 16);
348 fbd_tiler->heap_free_address = tiler_heap_free;
350 if (flags_different_page)
351 kbasep_unmap_page_sync(fbd_tiler_flags, phys_addr_flags);
353 kbasep_unmap_page_sync(fbd_tiler, phys_addr);
355 return MALI_ERROR_NONE;
359 * @brief Reset the status of an FBD pointed to by a tiler job
361 * This performs two functions :
362 * - Set the hierarchy mask
363 * - Reset the tiler free heap address
365 * @param[in] kctx Context pointer
366 * @param[in] job_header Address of job header to reset.
367 * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
368 * @param[in] hierarchy_mask The hierarchy mask to use
369 * @param[in] default_weight Default hierarchy weight to write when no other
370 * weight is given in the FBD
371 * @param[in] job_64 MALI_TRUE if this job is using 64-bit
374 * @return MALI_ERROR_NONE on success, error code on failure
376 static mali_error kbasep_replay_reset_tiler_job(kbase_context *kctx,
377 mali_addr64 job_header,
378 mali_addr64 tiler_heap_free,
383 mali_addr64 fbd_address;
386 dev_err(kctx->kbdev->dev,
387 "64-bit job descriptor not supported\n");
388 return MALI_ERROR_FUNCTION_FAILED;
392 job_ext = kbasep_map_page(kctx,
393 job_header + JOB_HEADER_32_FBD_OFFSET,
396 dev_err(kctx->kbdev->dev,
397 "kbasep_replay_reset_tiler_job: failed to map jc\n");
398 return MALI_ERROR_FUNCTION_FAILED;
401 fbd_address = *job_ext;
403 kbasep_unmap_page(job_ext);
406 if (fbd_address & FBD_TYPE) {
407 return kbasep_replay_reset_mfbd(kctx,
408 fbd_address & FBD_POINTER_MASK,
413 return kbasep_replay_reset_sfbd(kctx,
414 fbd_address & FBD_POINTER_MASK,
422 * @brief Reset the status of a job
424 * This performs the following functions :
426 * - Reset the Job Status field of each job to NOT_STARTED.
427 * - Set the Job Type field of any Vertex Jobs to Null Job.
428 * - For any jobs using an FBD, set the Tiler Heap Free field to the value of
429 * the tiler_heap_free parameter, and set the hierarchy level mask to the
430 * hier_mask parameter.
431 * - Offset HW dependencies by the hw_job_id_offset parameter
432 * - Set the Perform Job Barrier flag if this job is the first in the chain
433 * - Read the address of the next job header
435 * @param[in] kctx Context pointer
436 * @param[in,out] job_header Address of job header to reset. Set to address
437 * of next job header on exit.
438 * @param[in] prev_jc Previous job chain to link to, if this job is
439 * the last in the chain.
440 * @param[in] hw_job_id_offset Offset for HW job IDs
441 * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
442 * @param[in] hierarchy_mask The hierarchy mask to use
443 * @param[in] default_weight Default hierarchy weight to write when no other
444 * weight is given in the FBD
445 * @param[in] first_in_chain MALI_TRUE if this job is the first in the chain
446 * @param[in] fragment_chain MALI_TRUE if this job is in the fragment chain
448 * @return MALI_ERROR_NONE on success, error code on failure
450 static mali_error kbasep_replay_reset_job(kbase_context *kctx,
451 mali_addr64 *job_header,
453 mali_addr64 tiler_heap_free,
456 u16 hw_job_id_offset,
457 mali_bool first_in_chain,
458 mali_bool fragment_chain)
462 mali_addr64 new_job_header;
463 struct device *dev = kctx->kbdev->dev;
465 job = kbasep_map_page_sync(kctx, *job_header, &phys_addr);
467 dev_err(dev, "kbasep_replay_parse_jc: failed to map jc\n");
468 return MALI_ERROR_FUNCTION_FAILED;
471 dump_job_head(kctx, "Job header:", job);
473 if (job->status == JOB_NOT_STARTED && !fragment_chain) {
474 dev_err(dev, "Job already not started\n");
475 kbasep_unmap_page_sync(job, phys_addr);
476 return MALI_ERROR_FUNCTION_FAILED;
478 job->status = JOB_NOT_STARTED;
480 if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_VERTEX)
481 job->flags = (job->flags & ~JOB_TYPE_MASK) | JOB_TYPE_NULL;
483 if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FUSED) {
484 dev_err(dev, "Fused jobs can not be replayed\n");
485 kbasep_unmap_page_sync(job, phys_addr);
486 return MALI_ERROR_FUNCTION_FAILED;
490 job->flags |= JOB_FLAG_PERFORM_JOB_BARRIER;
492 if ((job->dependencies[0] + hw_job_id_offset) > JOB_HEADER_ID_MAX ||
493 (job->dependencies[1] + hw_job_id_offset) > JOB_HEADER_ID_MAX ||
494 (job->index + hw_job_id_offset) > JOB_HEADER_ID_MAX) {
495 dev_err(dev, "Job indicies/dependencies out of valid range\n");
496 kbasep_unmap_page_sync(job, phys_addr);
497 return MALI_ERROR_FUNCTION_FAILED;
500 if (job->dependencies[0])
501 job->dependencies[0] += hw_job_id_offset;
502 if (job->dependencies[1])
503 job->dependencies[1] += hw_job_id_offset;
505 job->index += hw_job_id_offset;
507 if (job->flags & JOB_FLAG_DESC_SIZE) {
508 new_job_header = job->next._64;
510 job->next._64 = prev_jc;
512 new_job_header = job->next._32;
514 job->next._32 = prev_jc;
516 dump_job_head(kctx, "Updated to:", job);
518 if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_TILER) {
519 kbasep_unmap_page_sync(job, phys_addr);
520 if (kbasep_replay_reset_tiler_job(kctx, *job_header,
521 tiler_heap_free, hierarchy_mask,
523 job->flags & JOB_FLAG_DESC_SIZE) !=
525 return MALI_ERROR_FUNCTION_FAILED;
527 } else if ((job->flags & JOB_TYPE_MASK) == JOB_TYPE_FRAGMENT) {
530 if (job->flags & JOB_FLAG_DESC_SIZE) {
531 kbasep_unmap_page_sync(job, phys_addr);
532 dev_err(dev, "64-bit job descriptor not supported\n");
533 return MALI_ERROR_FUNCTION_FAILED;
535 fbd_address = (u64)job->fragment_fbd._32;
538 kbasep_unmap_page_sync(job, phys_addr);
540 if (fbd_address & FBD_TYPE) {
541 if (kbasep_replay_reset_mfbd(kctx,
542 fbd_address & FBD_POINTER_MASK,
547 return MALI_ERROR_FUNCTION_FAILED;
549 if (kbasep_replay_reset_sfbd(kctx,
550 fbd_address & FBD_POINTER_MASK,
555 return MALI_ERROR_FUNCTION_FAILED;
558 kbasep_unmap_page_sync(job, phys_addr);
561 *job_header = new_job_header;
563 return MALI_ERROR_NONE;
567 * @brief Find the highest job ID in a job chain
569 * @param[in] kctx Context pointer
570 * @param[in] jc Job chain start address
571 * @param[out] hw_job_id Highest job ID in chain
573 * @return MALI_ERROR_NONE on success, error code on failure
575 static mali_error kbasep_replay_find_hw_job_id(kbase_context *kctx,
583 KBASE_LOG(2, kctx->kbdev->dev,
584 "kbasep_replay_find_hw_job_id: parsing jc=%llx\n", jc);
586 job = kbasep_map_page_sync(kctx, jc, &phys_addr);
588 dev_err(kctx->kbdev->dev, "failed to map jc\n");
590 return MALI_ERROR_FUNCTION_FAILED;
593 if (job->index > *hw_job_id)
594 *hw_job_id = job->index;
596 if (job->flags & JOB_FLAG_DESC_SIZE)
601 kbasep_unmap_page_sync(job, phys_addr);
604 return MALI_ERROR_NONE;
608 * @brief Reset the status of a number of jobs
610 * This function walks the provided job chain, and calls
611 * kbasep_replay_reset_job for each job. It also links the job chain to the
612 * provided previous job chain.
614 * The function will fail if any of the jobs passed already have status of
617 * @param[in] kctx Context pointer
618 * @param[in] jc Job chain to be processed
619 * @param[in] prev_jc Job chain to be added to. May be NULL
620 * @param[in] tiler_heap_free The value to reset Tiler Heap Free to
621 * @param[in] hierarchy_mask The hierarchy mask to use
622 * @param[in] default_weight Default hierarchy weight to write when no other
623 * weight is given in the FBD
624 * @param[in] hw_job_id_offset Offset for HW job IDs
625 * @param[in] fragment_chain MAIL_TRUE if this chain is the fragment chain
627 * @return MALI_ERROR_NONE on success, error code otherwise
629 static mali_error kbasep_replay_parse_jc(kbase_context *kctx,
632 mali_addr64 tiler_heap_free,
635 u16 hw_job_id_offset,
636 mali_bool fragment_chain)
638 mali_bool first_in_chain = MALI_TRUE;
641 KBASE_LOG(2, kctx->kbdev->dev,
642 "kbasep_replay_parse_jc: jc=%llx hw_job_id=%x\n",
643 jc, hw_job_id_offset);
646 KBASE_LOG(2, kctx->kbdev->dev,
647 "kbasep_replay_parse_jc: parsing jc=%llx\n",
650 if (kbasep_replay_reset_job(kctx, &jc, prev_jc,
651 tiler_heap_free, hierarchy_mask,
652 default_weight, hw_job_id_offset,
653 first_in_chain, fragment_chain) !=
655 return MALI_ERROR_FUNCTION_FAILED;
657 first_in_chain = MALI_FALSE;
660 if (fragment_chain &&
661 nr_jobs >= BASE_JD_REPLAY_F_CHAIN_JOB_LIMIT) {
662 dev_err(kctx->kbdev->dev,
663 "Exceeded maximum number of jobs in fragment chain\n");
664 return MALI_ERROR_FUNCTION_FAILED;
668 return MALI_ERROR_NONE;
672 * @brief Reset the status of a replay job, and set up dependencies
674 * This performs the actions to allow the replay job to be re-run following
675 * completion of the passed dependency.
677 * @param[in] katom The atom to be reset
678 * @param[in] dep_atom The dependency to be attached to the atom
680 static void kbasep_replay_reset_softjob(kbase_jd_atom *katom,
681 kbase_jd_atom *dep_atom)
683 katom->status = KBASE_JD_ATOM_STATE_QUEUED;
684 katom->dep_atom[0] = dep_atom;
685 list_add_tail(&katom->dep_item[0], &dep_atom->dep_head[0]);
689 * @brief Allocate an unused katom
691 * This will search the provided context for an unused katom, and will mark it
692 * as KBASE_JD_ATOM_STATE_QUEUED.
694 * If no atoms are available then the function will fail.
696 * @param[in] kctx Context pointer
697 * @return An atom ID, or -1 on failure
699 static int kbasep_allocate_katom(kbase_context *kctx)
701 kbase_jd_context *jctx = &kctx->jctx;
704 for (i = BASE_JD_ATOM_COUNT-1; i > 0; i--) {
705 if (jctx->atoms[i].status == KBASE_JD_ATOM_STATE_UNUSED) {
706 jctx->atoms[i].status = KBASE_JD_ATOM_STATE_QUEUED;
707 KBASE_LOG(2, kctx->kbdev->dev,
708 "kbasep_allocate_katom: Allocated atom %d\n",
718 * @brief Release a katom
720 * This will mark the provided atom as available, and remove any dependencies.
722 * For use on error path.
724 * @param[in] kctx Context pointer
725 * @param[in] atom_id ID of atom to release
727 static void kbasep_release_katom(kbase_context *kctx, int atom_id)
729 kbase_jd_context *jctx = &kctx->jctx;
731 KBASE_LOG(2, kctx->kbdev->dev,
732 "kbasep_release_katom: Released atom %d\n",
735 while (!list_empty(&jctx->atoms[atom_id].dep_head[0]))
736 list_del(jctx->atoms[atom_id].dep_head[0].next);
737 while (!list_empty(&jctx->atoms[atom_id].dep_head[1]))
738 list_del(jctx->atoms[atom_id].dep_head[1].next);
740 jctx->atoms[atom_id].status = KBASE_JD_ATOM_STATE_UNUSED;
743 static void kbasep_replay_create_atom(kbase_context *kctx,
744 base_jd_atom_v2 *atom,
749 atom->extres_list.value = NULL;
751 /* Convert priority back from NICE range */
752 atom->prio = ((prio << 16) / ((20 << 16) / 128)) - 128;
753 atom->atom_number = atom_nr;
755 atom->pre_dep[0] = 0;
756 atom->pre_dep[1] = 0;
758 atom->udata.blob[0] = 0;
759 atom->udata.blob[1] = 0;
763 * @brief Create two atoms for the purpose of replaying jobs
765 * Two atoms are allocated and created. The jc pointer is not set at this
766 * stage. The second atom has a dependency on the first. The remaining fields
767 * are set up as follows :
769 * - No external resources. Any required external resources will be held by the
771 * - device_nr is set to 0. This is not relevant as
772 * BASE_JD_REQ_SPECIFIC_COHERENT_GROUP should not be set.
773 * - Priority is inherited from the replay job.
775 * @param[out] t_atom Atom to use for tiler jobs
776 * @param[out] f_atom Atom to use for fragment jobs
777 * @param[in] prio Priority of new atom (inherited from replay soft
779 * @return MALI_ERROR_NONE on success, error code on failure
781 static mali_error kbasep_replay_create_atoms(kbase_context *kctx,
782 base_jd_atom_v2 *t_atom,
783 base_jd_atom_v2 *f_atom,
786 int t_atom_nr, f_atom_nr;
788 t_atom_nr = kbasep_allocate_katom(kctx);
790 dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
791 return MALI_ERROR_FUNCTION_FAILED;
794 f_atom_nr = kbasep_allocate_katom(kctx);
796 dev_err(kctx->kbdev->dev, "Failed to allocate katom\n");
797 kbasep_release_katom(kctx, t_atom_nr);
798 return MALI_ERROR_FUNCTION_FAILED;
801 kbasep_replay_create_atom(kctx, t_atom, t_atom_nr, prio);
802 kbasep_replay_create_atom(kctx, f_atom, f_atom_nr, prio);
804 f_atom->pre_dep[0] = t_atom_nr;
806 return MALI_ERROR_NONE;
809 #ifdef CONFIG_MALI_DEBUG
810 static void payload_dump(kbase_context *kctx, base_jd_replay_payload *payload)
814 KBASE_LOG(2, kctx->kbdev->dev, "Tiler jc list :\n");
815 next = payload->tiler_jc_list;
818 base_jd_replay_jc *jc_struct = kbasep_map_page(kctx, next, NULL);
823 KBASE_LOG(2, kctx->kbdev->dev,
824 "* jc_struct=%p jc=%llx next=%llx\n",
828 next = jc_struct->next;
830 kbasep_unmap_page(jc_struct);
836 * @brief Parse a base_jd_replay_payload provided by userspace
838 * This will read the payload from userspace, and parse the job chains.
840 * @param[in] kctx Context pointer
841 * @param[in] replay_atom Replay soft job atom
842 * @param[in] t_atom Atom to use for tiler jobs
843 * @param[in] f_atom Atom to use for fragment jobs
844 * @return MALI_ERROR_NONE on success, error code on failure
846 static mali_error kbasep_replay_parse_payload(kbase_context *kctx,
847 kbase_jd_atom *replay_atom,
848 base_jd_atom_v2 *t_atom,
849 base_jd_atom_v2 *f_atom)
851 base_jd_replay_payload *payload;
853 mali_addr64 prev_jc = 0;
854 u16 hw_job_id_offset = 0;
855 mali_error ret = MALI_ERROR_FUNCTION_FAILED;
857 struct device *dev = kctx->kbdev->dev;
860 "kbasep_replay_parse_payload: replay_atom->jc = %llx "
861 "sizeof(payload) = %d\n",
862 replay_atom->jc, sizeof(payload));
864 kbase_gpu_vm_lock(kctx);
866 payload = kbasep_map_page_sync(kctx, replay_atom->jc, &phys_addr);
869 kbase_gpu_vm_unlock(kctx);
870 dev_err(dev, "kbasep_replay_parse_payload: failed to map payload into kernel space\n");
871 return MALI_ERROR_FUNCTION_FAILED;
874 #ifdef CONFIG_MALI_DEBUG
875 KBASE_LOG(2, dev, "kbasep_replay_parse_payload: payload=%p\n", payload);
876 KBASE_LOG(2, dev, "Payload structure:\n"
877 "tiler_jc_list = %llx\n"
878 "fragment_jc = %llx\n"
879 "tiler_heap_free = %llx\n"
880 "fragment_hierarchy_mask = %x\n"
881 "tiler_hierarchy_mask = %x\n"
882 "hierarchy_default_weight = %x\n"
883 "tiler_core_req = %x\n"
884 "fragment_core_req = %x\n",
885 payload->tiler_jc_list,
886 payload->fragment_jc,
887 payload->tiler_heap_free,
888 payload->fragment_hierarchy_mask,
889 payload->tiler_hierarchy_mask,
890 payload->hierarchy_default_weight,
891 payload->tiler_core_req,
892 payload->fragment_core_req);
893 payload_dump(kctx, payload);
896 t_atom->core_req = payload->tiler_core_req | BASEP_JD_REQ_EVENT_NEVER;
897 f_atom->core_req = payload->fragment_core_req | BASEP_JD_REQ_EVENT_NEVER;
899 /* Sanity check core requirements*/
900 if ((t_atom->core_req & BASEP_JD_REQ_ATOM_TYPE &
901 ~BASE_JD_REQ_COHERENT_GROUP) != BASE_JD_REQ_T ||
902 (f_atom->core_req & BASEP_JD_REQ_ATOM_TYPE &
903 ~BASE_JD_REQ_COHERENT_GROUP) != BASE_JD_REQ_FS ||
904 t_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES ||
905 f_atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
906 dev_err(dev, "Invalid core requirements\n");
910 /* Process tiler job chains */
911 next = payload->tiler_jc_list;
913 dev_err(dev, "Invalid tiler JC list\n");
918 base_jd_replay_jc *jc_struct = kbasep_map_page(kctx, next, NULL);
922 dev_err(dev, "Failed to map jc struct\n");
927 next = jc_struct->next;
931 kbasep_unmap_page(jc_struct);
934 u16 max_hw_job_id = 0;
936 if (kbasep_replay_find_hw_job_id(kctx, jc,
937 &max_hw_job_id) != MALI_ERROR_NONE)
940 if (kbasep_replay_parse_jc(kctx, jc, prev_jc,
941 payload->tiler_heap_free,
942 payload->tiler_hierarchy_mask,
943 payload->hierarchy_default_weight,
944 hw_job_id_offset, MALI_FALSE) !=
949 hw_job_id_offset += max_hw_job_id;
954 t_atom->jc = prev_jc;
956 /* Process fragment job chain */
957 f_atom->jc = payload->fragment_jc;
958 if (kbasep_replay_parse_jc(kctx, payload->fragment_jc, 0,
959 payload->tiler_heap_free,
960 payload->fragment_hierarchy_mask,
961 payload->hierarchy_default_weight, 0,
962 MALI_TRUE) != MALI_ERROR_NONE) {
966 if (!t_atom->jc || !f_atom->jc) {
967 dev_err(dev, "Invalid payload\n");
971 KBASE_LOG(2, dev, "t_atom->jc=%llx f_atom->jc=%llx\n",
972 t_atom->jc, f_atom->jc);
973 ret = MALI_ERROR_NONE;
976 kbasep_unmap_page_sync(payload, phys_addr);
978 kbase_gpu_vm_unlock(kctx);
984 * @brief Process a replay job
986 * Called from kbase_process_soft_job.
988 * On exit, if the job has completed, katom->event_code will have been updated.
989 * If the job has not completed, and is replaying jobs, then the atom status
990 * will have been reset to KBASE_JD_ATOM_STATE_QUEUED.
992 * @param[in] katom The atom to be processed
993 * @return MALI_REPLAY_STATUS_COMPLETE if the atom has completed
994 * MALI_REPLAY_STATUS_REPLAYING if the atom is replaying jobs
995 * Set MALI_REPLAY_FLAG_JS_RESCHED if
996 * kbasep_js_try_schedule_head_ctx required
998 int kbase_replay_process(kbase_jd_atom *katom)
1000 kbase_context *kctx = katom->kctx;
1001 kbase_jd_context *jctx = &kctx->jctx;
1002 mali_bool need_to_try_schedule_context = MALI_FALSE;
1003 base_jd_atom_v2 t_atom, f_atom;
1004 kbase_jd_atom *t_katom, *f_katom;
1005 struct device *dev = kctx->kbdev->dev;
1007 if (katom->event_code == BASE_JD_EVENT_DONE) {
1008 KBASE_LOG(2, dev, "Previous job succeeded - not replaying\n");
1009 return MALI_REPLAY_STATUS_COMPLETE;
1012 if (jctx->sched_info.ctx.is_dying) {
1013 KBASE_LOG(2, dev, "Not replaying; context is dying\n");
1014 return MALI_REPLAY_STATUS_COMPLETE;
1017 dev_warn(dev, "Replaying jobs retry=%d\n", katom->retry_count);
1019 katom->retry_count++;
1020 if (katom->retry_count > BASEP_JD_REPLAY_LIMIT) {
1021 dev_err(dev, "Replay exceeded limit - failing jobs\n");
1022 /* katom->event_code is already set to the failure code of the
1024 return MALI_REPLAY_STATUS_COMPLETE;
1027 if (kbasep_replay_create_atoms(kctx, &t_atom, &f_atom,
1028 katom->nice_prio) != MALI_ERROR_NONE) {
1029 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1030 return MALI_REPLAY_STATUS_COMPLETE;
1033 t_katom = &jctx->atoms[t_atom.atom_number];
1034 f_katom = &jctx->atoms[f_atom.atom_number];
1036 if (kbasep_replay_parse_payload(kctx, katom, &t_atom, &f_atom) !=
1038 kbasep_release_katom(kctx, t_atom.atom_number);
1039 kbasep_release_katom(kctx, f_atom.atom_number);
1040 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1041 return MALI_REPLAY_STATUS_COMPLETE;
1044 kbasep_replay_reset_softjob(katom, f_katom);
1046 need_to_try_schedule_context |= jd_submit_atom(kctx, &t_atom, t_katom);
1047 if (t_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
1048 dev_err(dev, "Replay failed to submit atom\n");
1049 kbasep_release_katom(kctx, f_atom.atom_number);
1050 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1051 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
1052 return MALI_REPLAY_STATUS_COMPLETE;
1054 need_to_try_schedule_context |= jd_submit_atom(kctx, &f_atom, f_katom);
1055 if (f_katom->event_code == BASE_JD_EVENT_JOB_INVALID) {
1056 dev_err(dev, "Replay failed to submit atom\n");
1057 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
1058 katom->status = KBASE_JD_ATOM_STATE_COMPLETED;
1059 return MALI_REPLAY_STATUS_COMPLETE;
1062 katom->event_code = BASE_JD_EVENT_DONE;
1064 if (need_to_try_schedule_context)
1065 return MALI_REPLAY_STATUS_REPLAYING |
1066 MALI_REPLAY_FLAG_JS_RESCHED;
1067 return MALI_REPLAY_STATUS_REPLAYING;