3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_defs.h
23 * Defintions (types, defines, etcs) common to Kbase. They are placed here to
24 * allow the hierarchy of header files to work.
27 #ifndef _KBASE_DEFS_H_
28 #define _KBASE_DEFS_H_
30 #include <mali_kbase_config.h>
31 #include <mali_base_hwconfig.h>
32 #include <mali_kbase_mem_lowlevel.h>
33 #include <mali_kbase_mem_alloc.h>
36 #include <linux/atomic.h>
37 #include <linux/mempool.h>
38 #include <linux/slab.h>
41 #include <linux/kds.h>
42 #endif /* CONFIG_KDS */
46 #endif /* CONFIG_SYNC */
48 /** Enable SW tracing when set */
49 #ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE
50 #define KBASE_TRACE_ENABLE 1
53 #ifndef KBASE_TRACE_ENABLE
54 #ifdef CONFIG_MALI_DEBUG
55 #define KBASE_TRACE_ENABLE 1
57 #define KBASE_TRACE_ENABLE 0
58 #endif /* CONFIG_MALI_DEBUG */
59 #endif /* KBASE_TRACE_ENABLE */
61 /** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
62 #define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
65 * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
66 * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
67 * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
70 #define ZAP_TIMEOUT 1000
72 /** Number of milliseconds before we time out on a GPU soft/hard reset */
73 #define RESET_TIMEOUT 500
76 * Prevent soft-stops from occuring in scheduling situations
78 * This is not due to HW issues, but when scheduling is desired to be more predictable.
80 * Therefore, soft stop may still be disabled due to HW issues.
82 * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
84 * @note if not in use, define this value to 0 instead of \#undef'ing it
86 #define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
89 * Prevent hard-stops from occuring in scheduling situations
91 * This is not due to HW issues, but when scheduling is desired to be more predictable.
93 * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
95 * @note if not in use, define this value to 0 instead of \#undef'ing it
97 #define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
99 /* Forward declarations+defintions */
100 typedef struct kbase_context kbase_context;
101 typedef struct kbase_jd_atom kbasep_jd_atom;
102 typedef struct kbase_device kbase_device;
105 * The maximum number of Job Slots to support in the Hardware.
107 * You can optimize this down if your target devices will only ever support a
108 * small number of job slots.
110 #define BASE_JM_MAX_NR_SLOTS 16
113 * The maximum number of Address Spaces to support in the Hardware.
115 * You can optimize this down if your target devices will only ever support a
116 * small number of Address Spaces
118 #define BASE_MAX_NR_AS 16
121 #define ENTRY_IS_ATE 1ULL
122 #define ENTRY_IS_INVAL 2ULL
123 #define ENTRY_IS_PTE 3ULL
125 #define MIDGARD_MMU_VA_BITS 48
127 #define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
128 #define ENTRY_RD_BIT (1ULL << 6)
129 #define ENTRY_WR_BIT (1ULL << 7)
130 #define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
131 #define ENTRY_ACCESS_BIT (1ULL << 10)
132 #define ENTRY_NX_BIT (1ULL << 54)
134 #define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
136 #if MIDGARD_MMU_VA_BITS > 39
137 #define MIDGARD_MMU_TOPLEVEL 0
139 #define MIDGARD_MMU_TOPLEVEL 1
142 #define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW)
143 #define GROWABLE_FLAGS_MASK (GROWABLE_FLAGS_REQUIRED | KBASE_REG_FREE)
145 /** setting in kbase_context::as_nr that indicates it's invalid */
146 #define KBASEP_AS_NR_INVALID (-1)
148 #define KBASE_LOCK_REGION_MAX_SIZE (63)
149 #define KBASE_LOCK_REGION_MIN_SIZE (11)
151 #define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */
152 #define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
153 #define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
155 #include "mali_kbase_js_defs.h"
157 #define KBASEP_FORCE_REPLAY_DISABLED 0
159 /* Maximum force replay limit when randomization is enabled */
160 #define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16
163 * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
164 * handles retaining cores for power management and affinity management.
166 * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
167 * where lots of atoms could be submitted before powerup, and each has an
168 * affinity chosen that causes other atoms to have an affinity
169 * violation. Whilst the affinity was not causing violations at the time it
170 * was chosen, it could cause violations thereafter. For example, 1000 jobs
171 * could have had their affinity chosen during the powerup time, so any of
172 * those 1000 jobs could cause an affinity violation later on.
174 * The attack would otherwise occur because other atoms/contexts have to wait for:
175 * -# the currently running atoms (which are causing the violation) to
177 * -# and, the atoms that had their affinity chosen during powerup to
178 * finish. These are run preferrentially because they don't cause a
179 * violation, but instead continue to cause the violation in others.
180 * -# or, the attacker is scheduled out (which might not happen for just 2
183 * By re-choosing the affinity (which is designed to avoid violations at the
184 * time it's chosen), we break condition (2) of the wait, which minimizes the
185 * problem to just waiting for current jobs to finish (which can be bounded if
186 * the Job Scheduling Policy has a timer).
189 /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
190 KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
191 /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
192 KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
193 /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
194 KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
195 /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
196 KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
197 /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
198 KBASE_ATOM_COREREF_STATE_READY
199 } kbase_atom_coreref_state;
202 /** Atom is not used */
203 KBASE_JD_ATOM_STATE_UNUSED,
204 /** Atom is queued in JD */
205 KBASE_JD_ATOM_STATE_QUEUED,
206 /** Atom has been given to JS (is runnable/running) */
207 KBASE_JD_ATOM_STATE_IN_JS,
208 /** Atom has been completed, but not yet handed back to userspace */
209 KBASE_JD_ATOM_STATE_COMPLETED
210 } kbase_jd_atom_state;
212 /** Atom has been previously soft-stoppped */
213 #define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
214 /** Atom has been previously retried to execute */
215 #define KBASE_KATOM_FLAGS_RERUN (1<<2)
216 #define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
218 typedef struct kbase_jd_atom kbase_jd_atom;
220 struct kbase_jd_atom_dependency
222 struct kbase_jd_atom *atom;
227 * @brief The function retrieves a read-only reference to the atom field from
228 * the kbase_jd_atom_dependency structure
230 * @param[in] dep kbase jd atom dependency.
232 * @return readonly reference to dependent ATOM.
234 static INLINE const struct kbase_jd_atom* const kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency* dep)
236 LOCAL_ASSERT(dep != NULL);
238 return (const struct kbase_jd_atom* const )(dep->atom);
242 * @brief The function retrieves a read-only reference to the dependency type field from
243 * the kbase_jd_atom_dependency structure
245 * @param[in] dep kbase jd atom dependency.
247 * @return A dependency type value.
249 static INLINE const u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency* dep)
251 LOCAL_ASSERT(dep != NULL);
253 return dep->dep_type;
257 * @brief Setter macro for dep_atom array entry in kbase_jd_atom
259 * @param[in] dep The kbase jd atom dependency.
260 * @param[in] a The ATOM to be set as a dependency.
261 * @param type The ATOM dependency type to be set.
264 static INLINE void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency* const_dep,
265 struct kbase_jd_atom * a,
268 struct kbase_jd_atom_dependency* dep;
270 LOCAL_ASSERT(const_dep != NULL);
272 dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
275 dep->dep_type = type;
279 * @brief Setter macro for dep_atom array entry in kbase_jd_atom
281 * @param[in] dep The kbase jd atom dependency to be cleared.
284 static INLINE void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency* const_dep)
286 struct kbase_jd_atom_dependency* dep;
288 LOCAL_ASSERT(const_dep != NULL);
290 dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
293 dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
298 mali_addr64 gpu_address;
299 struct kbase_mem_phy_alloc * alloc;
302 struct kbase_jd_atom {
303 struct work_struct work;
304 ktime_t start_timestamp;
309 struct list_head dep_head[2];
310 struct list_head dep_item[2];
311 const struct kbase_jd_atom_dependency dep[2];
314 struct kbase_ext_res * extres;
319 kbase_atom_coreref_state coreref_state;
321 struct list_head node;
322 struct kds_resource_set *kds_rset;
323 mali_bool kds_dep_satisfied;
324 #endif /* CONFIG_KDS */
326 struct sync_fence *fence;
327 struct sync_fence_waiter sync_waiter;
328 #endif /* CONFIG_SYNC */
330 /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
331 base_jd_event_code event_code;
332 base_jd_core_req core_req; /**< core requirements */
333 /** Job Slot to retry submitting to if submission from IRQ handler failed
335 * NOTE: see if this can be unified into the another member e.g. the event */
336 int retry_submit_on_slot;
338 kbasep_js_policy_job_info sched_info;
339 /* atom priority scaled to nice range with +20 offset 0..39 */
342 int poking; /* BASE_HW_ISSUE_8316 */
344 wait_queue_head_t completed;
345 kbase_jd_atom_state status;
346 #ifdef CONFIG_GPU_TRACEPOINTS
349 /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */
354 /* Number of times this atom has been retried. Used by replay soft job.
360 * Theory of operations:
362 * Atom objects are statically allocated within the context structure.
364 * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
367 #define KBASE_JD_DEP_QUEUE_SIZE 256
369 typedef struct kbase_jd_context {
371 kbasep_js_kctx_info sched_info;
372 kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
374 /** Tracks all job-dispatch jobs. This includes those not tracked by
375 * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
378 /** Waitq that reflects whether there are no jobs (including SW-only
379 * dependency jobs). This is set when no jobs are present on the ctx,
380 * and clear when there are jobs.
382 * @note: Job Dispatcher knows about more jobs than the Job Scheduler:
383 * the Job Scheduler is unaware of jobs that are blocked on dependencies,
384 * and SW-only dependency jobs.
386 * This waitq can be waited upon to find out when the context jobs are all
387 * done/cancelled (including those that might've been blocked on
388 * dependencies) - and so, whether it can be terminated. However, it should
389 * only be terminated once it is neither present in the policy-queue (see
390 * kbasep_js_policy_try_evict_ctx() ) nor the run-pool (see
391 * kbasep_js_kctx_info::ctx::is_scheduled).
393 * Since the waitq is only set under kbase_jd_context::lock,
394 * the waiter should also briefly obtain and drop kbase_jd_context::lock to
395 * guarentee that the setter has completed its work on the kbase_context
397 * This must be updated atomically with:
398 * - kbase_jd_context::job_nr */
399 wait_queue_head_t zero_jobs_wait;
401 /** Job Done workqueue. */
402 struct workqueue_struct *job_done_wq;
406 size_t tb_wrap_offset;
409 struct kds_callback kds_cb;
410 #endif /* CONFIG_KDS */
411 #ifdef CONFIG_GPU_TRACEPOINTS
416 typedef struct kbase_jm_slot {
417 /* The number of slots must be a power of two */
418 #define BASE_JM_SUBMIT_SLOTS 16
419 #define BASE_JM_SUBMIT_SLOTS_MASK (BASE_JM_SUBMIT_SLOTS - 1)
421 struct kbase_jd_atom *submitted[BASE_JM_SUBMIT_SLOTS];
423 kbase_context *last_context;
431 typedef enum kbase_midgard_type {
436 } kbase_midgard_type;
438 typedef struct kbase_device_info {
439 kbase_midgard_type dev_type;
443 /** Poking state for BASE_HW_ISSUE_8316 */
445 KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0,
446 KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1
449 /** Poking state for BASE_HW_ISSUE_8316 */
450 typedef u32 kbase_as_poke_state;
453 * Important: Our code makes assumptions that a kbase_as structure is always at
454 * kbase_device->as[number]. This is used to recover the containing
455 * kbase_device from a kbase_as structure.
457 * Therefore, kbase_as structures must not be allocated anywhere else.
459 typedef struct kbase_as {
462 struct workqueue_struct *pf_wq;
463 struct work_struct work_pagefault;
464 struct work_struct work_busfault;
465 mali_addr64 fault_addr;
467 struct mutex transaction_mutex;
469 /* BASE_HW_ISSUE_8316 */
470 struct workqueue_struct *poke_wq;
471 struct work_struct poke_work;
472 /** Protected by kbasep_js_device_data::runpool_irq::lock */
474 /** Protected by kbasep_js_device_data::runpool_irq::lock */
475 kbase_as_poke_state poke_state;
476 struct hrtimer poke_timer;
480 * Instrumentation State Machine States
483 /** State where instrumentation is not active */
484 KBASE_INSTR_STATE_DISABLED = 0,
485 /** State machine is active and ready for a command. */
486 KBASE_INSTR_STATE_IDLE,
487 /** Hardware is currently dumping a frame. */
488 KBASE_INSTR_STATE_DUMPING,
489 /** We've requested a clean to occur on a workqueue */
490 KBASE_INSTR_STATE_REQUEST_CLEAN,
491 /** Hardware is currently cleaning and invalidating caches. */
492 KBASE_INSTR_STATE_CLEANING,
493 /** Cache clean completed, and either a) a dump is complete, or
494 * b) instrumentation can now be setup. */
495 KBASE_INSTR_STATE_CLEANED,
496 /** kbasep_reset_timeout_worker() has started (but not compelted) a
497 * reset. This generally indicates the current action should be aborted, and
498 * kbasep_reset_timeout_worker() will handle the cleanup */
499 KBASE_INSTR_STATE_RESETTING,
500 /** An error has occured during DUMPING (page fault). */
501 KBASE_INSTR_STATE_FAULT
504 typedef struct kbasep_mem_device {
505 atomic_t used_pages; /* Tracks usage of OS shared memory. Updated
506 when OS memory is allocated/freed. */
512 #define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
515 /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
516 * THIS MUST BE USED AT THE START OF THE ENUM */
517 #define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
518 #include "mali_kbase_trace_defs.h"
519 #undef KBASE_TRACE_CODE_MAKE_CODE
520 /* Comma on its own, to extend the list */
522 /* Must be the last in the enum */
523 KBASE_TRACE_CODE_COUNT
526 #define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
527 #define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
529 typedef struct kbase_trace {
530 struct timespec timestamp;
538 unsigned long info_val;
545 /** Event IDs for the power management framework.
547 * Any of these events might be missed, so they should not be relied upon to
548 * find the precise state of the GPU at a particular time in the
549 * trace. Overall, we should get a high percentage of these events for
550 * statisical purposes, and so a few missing should not be a problem */
551 typedef enum kbase_timeline_pm_event {
552 /* helper for tests */
553 KBASEP_TIMELINE_PM_EVENT_FIRST,
555 /** Event reserved for backwards compatibility with 'init' events */
556 KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST,
558 /** The power state of the device has changed.
560 * Specifically, the device has reached a desired or available state.
562 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED,
564 /** The GPU is becoming active.
566 * This event is sent when the first context is about to use the GPU.
568 KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE,
570 /** The GPU is becoming idle.
572 * This event is sent when the last context has finished using the GPU.
574 KBASE_TIMELINE_PM_EVENT_GPU_IDLE,
576 /** Event reserved for backwards compatibility with 'policy_change'
578 KBASE_TIMELINE_PM_EVENT_RESERVED_4,
580 /** Event reserved for backwards compatibility with 'system_suspend'
582 KBASE_TIMELINE_PM_EVENT_RESERVED_5,
584 /** Event reserved for backwards compatibility with 'system_resume'
586 KBASE_TIMELINE_PM_EVENT_RESERVED_6,
588 /** The job scheduler is requesting to power up/down cores.
590 * This event is sent when:
591 * - powered down cores are needed to complete a job
592 * - powered up cores are not needed anymore
594 KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
596 KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
597 } kbase_timeline_pm_event;
599 #ifdef CONFIG_MALI_TRACE_TIMELINE
600 typedef struct kbase_trace_kctx_timeline {
601 atomic_t jd_atoms_in_flight;
603 } kbase_trace_kctx_timeline;
605 typedef struct kbase_trace_kbdev_timeline {
607 struct dentry *dentry;
609 /* Note: strictly speaking, not needed, because it's in sync with
610 * kbase_device::jm_slots[]::submitted_nr
612 * But it's kept as an example of how to add global timeline tracking
615 * The caller must hold kbasep_js_device_data::runpool_irq::lock when
617 u8 slot_atoms_submitted[BASE_JM_SUBMIT_SLOTS];
619 /* Last UID for each PM event */
620 atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1];
621 /* Counter for generating PM event UIDs */
622 atomic_t pm_event_uid_counter;
624 * L2 transition state - MALI_TRUE indicates that the transition is ongoing
625 * Expected to be protected by pm.power_change_lock */
626 mali_bool l2_transitioning;
627 } kbase_trace_kbdev_timeline;
628 #endif /* CONFIG_MALI_TRACE_TIMELINE */
631 typedef struct kbasep_kctx_list_element {
632 struct list_head link;
634 } kbasep_kctx_list_element;
636 #define DEVNAME_SIZE 16
638 struct kbase_device {
639 /** jm_slots is protected by kbasep_js_device_data::runpool_irq::lock */
640 kbase_jm_slot jm_slots[BASE_JM_MAX_NR_SLOTS];
641 s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
643 struct list_head entry;
645 struct miscdevice mdev;
649 struct resource *reg_res;
654 char devname[DEVNAME_SIZE];
656 #ifdef CONFIG_MALI_NO_MALI
658 struct kmem_cache *irq_slab;
659 struct workqueue_struct *irq_workq;
660 atomic_t serving_job_irq;
661 atomic_t serving_gpu_irq;
662 atomic_t serving_mmu_irq;
663 spinlock_t reg_op_lock;
664 #endif /* CONFIG_MALI_NO_MALI */
666 kbase_pm_device_data pm;
667 kbasep_js_device_data js_data;
668 kbasep_mem_device memdev;
670 kbase_as as[BASE_MAX_NR_AS];
672 spinlock_t mmu_mask_change;
674 kbase_gpu_props gpu_props;
676 /** List of SW workarounds for HW issues */
677 unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
678 /** List of features available */
679 unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
681 /* Cached present bitmaps - these are the same as the corresponding hardware registers */
682 u64 shader_present_bitmap;
683 u64 tiler_present_bitmap;
684 u64 l2_present_bitmap;
685 u64 l3_present_bitmap;
687 /* Bitmaps of cores that are currently in use (running jobs).
688 * These should be kept up to date by the job scheduler.
690 * pm.power_change_lock should be held when accessing these members.
692 * kbase_pm_check_transitions_nolock() should be called when bits are
693 * cleared to update the power management system and allow transitions to
695 u64 shader_inuse_bitmap;
697 /* Refcount for cores in use */
698 u32 shader_inuse_cnt[64];
700 /* Bitmaps of cores the JS needs for jobs ready to run */
701 u64 shader_needed_bitmap;
703 /* Refcount for cores needed */
704 u32 shader_needed_cnt[64];
708 u32 tiler_needed_cnt;
710 /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
713 /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be
714 * submitted to these cores. These are updated by the power management code. The job scheduler should avoid
715 * submitting new jobs to any cores that are not marked as available.
717 * pm.power_change_lock should be held when accessing these members.
719 u64 shader_available_bitmap;
720 u64 tiler_available_bitmap;
721 u64 l2_available_bitmap;
723 u64 shader_ready_bitmap;
724 u64 shader_transitioning_bitmap;
726 s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */
727 s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */
729 /* Structure used for instrumentation and HW counters dumping */
731 /* The lock should be used when accessing any of the following members */
736 wait_queue_head_t wait;
738 kbase_instr_state state;
739 wait_queue_head_t cache_clean_wait;
740 struct workqueue_struct *cache_clean_wq;
741 struct work_struct cache_clean_work;
743 kbase_context *suspended_kctx;
744 kbase_uk_hwcnt_setup suspended_state;
747 /* Set when we're about to reset the GPU */
749 #define KBASE_RESET_GPU_NOT_PENDING 0 /* The GPU reset isn't pending */
750 #define KBASE_RESET_GPU_PREPARED 1 /* kbase_prepare_to_reset_gpu has been called */
751 #define KBASE_RESET_GPU_COMMITTED 2 /* kbase_reset_gpu has been called - the reset will now definitely happen
752 * within the timeout period */
753 #define KBASE_RESET_GPU_HAPPENING 3 /* The GPU reset process is currently occuring (timeout has expired or
754 * kbasep_try_reset_gpu_early was called) */
756 /* Work queue and work item for performing the reset in */
757 struct workqueue_struct *reset_workq;
758 struct work_struct reset_work;
759 wait_queue_head_t reset_wait;
760 struct hrtimer reset_timer;
762 /*value to be written to the irq_throttle register each time an irq is served */
763 atomic_t irq_throttle_cycles;
765 const kbase_attribute *config_attributes;
767 #if KBASE_TRACE_ENABLE != 0
768 spinlock_t trace_lock;
771 kbase_trace *trace_rbuf;
774 #if MALI_CUSTOMER_RELEASE == 0
775 /* This is used to override the current job scheduler values for
776 * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_SS
777 * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_CL
778 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS
779 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL
780 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS
781 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS
782 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL
783 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS.
785 * These values are set via the js_timeouts sysfs file.
787 u32 js_soft_stop_ticks;
788 u32 js_soft_stop_ticks_cl;
789 u32 js_hard_stop_ticks_ss;
790 u32 js_hard_stop_ticks_cl;
791 u32 js_hard_stop_ticks_nss;
792 u32 js_reset_ticks_ss;
793 u32 js_reset_ticks_cl;
794 u32 js_reset_ticks_nss;
797 struct mutex cacheclean_lock;
799 /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */
800 void *platform_context;
802 /** Count of contexts keeping the GPU powered */
803 atomic_t keep_gpu_powered_count;
805 /* List of kbase_contexts created */
806 struct list_head kctx_list;
807 struct mutex kctx_list_lock;
809 #ifdef CONFIG_MALI_MIDGARD_RT_PM
810 struct delayed_work runtime_pm_workqueue;
813 #ifdef CONFIG_MALI_TRACE_TIMELINE
814 kbase_trace_kbdev_timeline timeline;
817 #ifdef CONFIG_DEBUG_FS
818 /* directory for debugfs entries */
819 struct dentry *mali_debugfs_directory;
820 /* debugfs entry for gpu_memory */
821 struct dentry *gpu_memory_dentry;
822 /* debugfs entry for trace */
823 struct dentry *trace_dentry;
824 #endif /* CONFIG_DEBUG_FS */
826 /* fbdump profiling controls set by gator */
827 u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX];
830 #if MALI_CUSTOMER_RELEASE == 0
831 /* Number of jobs that are run before a job is forced to fail and
832 * replay. May be KBASEP_FORCE_REPLAY_DISABLED, to disable forced
834 int force_replay_limit;
835 /* Count of jobs between forced failures. Incremented on each job. A
836 * job is forced to fail once this is greater than or equal to
837 * force_replay_limit. */
838 int force_replay_count;
839 /* Core requirement for jobs to be failed and replayed. May be zero. */
840 base_jd_core_req force_replay_core_req;
841 /* MALI_TRUE if force_replay_limit should be randomized. The random
842 * value will be in the range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT.
844 mali_bool force_replay_random;
848 struct kbase_context {
851 struct list_head event_list;
852 struct mutex event_mutex;
853 mali_bool event_closed;
854 struct workqueue_struct *event_workq;
858 atomic_t setup_complete;
859 atomic_t setup_in_progress;
861 mali_bool keep_gpu_powered;
863 u64 *mmu_teardown_pages;
865 phys_addr_t aliasing_sink_page;
867 struct mutex reg_lock; /* To be converted to a rwlock? */
868 struct rb_root reg_rbtree; /* Red-Black tree of GPU regions (live regions) */
870 unsigned long cookies;
871 struct kbase_va_region *pending_regions[BITS_PER_LONG];
873 wait_queue_head_t event_queue;
877 kbase_jd_context jctx;
879 atomic_t nonmapped_pages;
881 kbase_mem_allocator osalloc;
882 kbase_mem_allocator * pgd_allocator;
884 struct list_head waiting_soft_jobs;
886 struct list_head waiting_kds_resource;
888 /** This is effectively part of the Run Pool, because it only has a valid
889 * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
891 * The kbasep_js_device_data::runpool_irq::lock must be held whilst accessing
894 * If the context relating to this as_nr is required, you must use
895 * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
896 * whilst you're using it. Alternatively, just hold the kbasep_js_device_data::runpool_irq::lock
897 * to ensure the context doesn't disappear (but this has restrictions on what other locks
898 * you can take whilst doing this) */
903 * Flags are in jctx.sched_info.ctx.flags
904 * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
906 * All other flags must be added there */
907 spinlock_t mm_update_lock;
908 struct mm_struct * process_mm;
910 #ifdef CONFIG_MALI_TRACE_TIMELINE
911 kbase_trace_kctx_timeline timeline;
915 typedef enum kbase_reg_access_type {
918 } kbase_reg_access_type;
920 typedef enum kbase_share_attr_bits {
921 /* (1ULL << 8) bit is reserved */
922 SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */
923 SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */
924 } kbase_share_attr_bits;
926 /* Conversion helpers for setting up high resolution timers */
927 #define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime((x)*1000000U))
928 #define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
930 /* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
931 #define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
932 /* Maximum number of loops polling the GPU for an AS flush to complete before we assume the GPU has hung */
933 #define KBASE_AS_FLUSH_MAX_LOOPS 100000
935 /* Return values from kbase_replay_process */
937 /* Replay job has completed */
938 #define MALI_REPLAY_STATUS_COMPLETE 0
939 /* Replay job is replaying and will continue once replayed jobs have completed.
941 #define MALI_REPLAY_STATUS_REPLAYING 1
942 #define MALI_REPLAY_STATUS_MASK 0xff
943 /* Caller must call kbasep_js_try_schedule_head_ctx */
944 #define MALI_REPLAY_FLAG_JS_RESCHED 0x100
946 /* Maximum number of times a job can be replayed */
947 #define BASEP_JD_REPLAY_LIMIT 15
949 #endif /* _KBASE_DEFS_H_ */