3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_defs.h
23 * Defintions (types, defines, etcs) common to Kbase. They are placed here to
24 * allow the hierarchy of header files to work.
27 #ifndef _KBASE_DEFS_H_
28 #define _KBASE_DEFS_H_
30 #include <kbase/mali_kbase_config.h>
31 #include <kbase/mali_base_hwconfig.h>
32 #include <kbase/src/common/mali_kbase_mem_lowlevel.h>
33 #include <kbase/src/common/mali_kbase_mem_alloc.h>
36 #include <linux/atomic.h>
37 #include <linux/mempool.h>
38 #include <linux/slab.h>
41 #include <linux/kds.h>
42 #endif /* CONFIG_KDS */
45 //#include <linux/sync.h>
47 #endif /* CONFIG_SYNC */
49 /** Enable SW tracing when set */
50 #ifdef CONFIG_MALI_T6XX_ENABLE_TRACE
51 #define KBASE_TRACE_ENABLE 1
54 #ifndef KBASE_TRACE_ENABLE
55 #ifdef CONFIG_MALI_DEBUG
56 #define KBASE_TRACE_ENABLE 1
58 #define KBASE_TRACE_ENABLE 0
59 #endif /* CONFIG_MALI_DEBUG */
60 #endif /* KBASE_TRACE_ENABLE */
62 /** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
63 #define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
66 * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
67 * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
68 * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
71 #define ZAP_TIMEOUT 1000
73 /** Number of milliseconds before we time out on a GPU soft/hard reset */
74 #define RESET_TIMEOUT 500
77 * Prevent soft-stops from occuring in scheduling situations
79 * This is not due to HW issues, but when scheduling is desired to be more predictable.
81 * Therefore, soft stop may still be disabled due to HW issues.
83 * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
85 * @note if not in use, define this value to 0 instead of \#undef'ing it
87 #define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
90 * Prevent hard-stops from occuring in scheduling situations
92 * This is not due to HW issues, but when scheduling is desired to be more predictable.
94 * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
96 * @note if not in use, define this value to 0 instead of \#undef'ing it
98 #define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
100 /* Forward declarations+defintions */
101 typedef struct kbase_context kbase_context;
102 typedef struct kbase_jd_atom kbasep_jd_atom;
103 typedef struct kbase_device kbase_device;
106 * The maximum number of Job Slots to support in the Hardware.
108 * You can optimize this down if your target devices will only ever support a
109 * small number of job slots.
111 #define BASE_JM_MAX_NR_SLOTS 16
114 * The maximum number of Address Spaces to support in the Hardware.
116 * You can optimize this down if your target devices will only ever support a
117 * small number of Address Spaces
119 #define BASE_MAX_NR_AS 16
122 #define ENTRY_IS_ATE 1ULL
123 #define ENTRY_IS_INVAL 2ULL
124 #define ENTRY_IS_PTE 3ULL
126 #define MIDGARD_MMU_VA_BITS 48
128 #define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
129 #define ENTRY_RD_BIT (1ULL << 6)
130 #define ENTRY_WR_BIT (1ULL << 7)
131 #define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
132 #define ENTRY_ACCESS_BIT (1ULL << 10)
133 #define ENTRY_NX_BIT (1ULL << 54)
135 #define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
137 #if MIDGARD_MMU_VA_BITS > 39
138 #define MIDGARD_MMU_TOPLEVEL 0
140 #define MIDGARD_MMU_TOPLEVEL 1
143 #define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW)
144 #define GROWABLE_FLAGS_MASK (GROWABLE_FLAGS_REQUIRED | KBASE_REG_FREE)
146 /** setting in kbase_context::as_nr that indicates it's invalid */
147 #define KBASEP_AS_NR_INVALID (-1)
149 #define KBASE_LOCK_REGION_MAX_SIZE (63)
150 #define KBASE_LOCK_REGION_MIN_SIZE (11)
152 #define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */
153 #define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
154 #define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
156 #include "mali_kbase_js_defs.h"
159 * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
160 * handles retaining cores for power management and affinity management.
162 * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
163 * where lots of atoms could be submitted before powerup, and each has an
164 * affinity chosen that causes other atoms to have an affinity
165 * violation. Whilst the affinity was not causing violations at the time it
166 * was chosen, it could cause violations thereafter. For example, 1000 jobs
167 * could have had their affinity chosen during the powerup time, so any of
168 * those 1000 jobs could cause an affinity violation later on.
170 * The attack would otherwise occur because other atoms/contexts have to wait for:
171 * -# the currently running atoms (which are causing the violation) to
173 * -# and, the atoms that had their affinity chosen during powerup to
174 * finish. These are run preferrentially because they don't cause a
175 * violation, but instead continue to cause the violation in others.
176 * -# or, the attacker is scheduled out (which might not happen for just 2
179 * By re-choosing the affinity (which is designed to avoid violations at the
180 * time it's chosen), we break condition (2) of the wait, which minimizes the
181 * problem to just waiting for current jobs to finish (which can be bounded if
182 * the Job Scheduling Policy has a timer).
185 /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
186 KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
187 /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
188 KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
189 /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
190 KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
191 /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
192 KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
193 /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
194 KBASE_ATOM_COREREF_STATE_READY
195 } kbase_atom_coreref_state;
198 /** Atom is not used */
199 KBASE_JD_ATOM_STATE_UNUSED,
200 /** Atom is queued in JD */
201 KBASE_JD_ATOM_STATE_QUEUED,
202 /** Atom has been given to JS (is runnable/running) */
203 KBASE_JD_ATOM_STATE_IN_JS,
204 /** Atom has been completed, but not yet handed back to userspace */
205 KBASE_JD_ATOM_STATE_COMPLETED
206 } kbase_jd_atom_state;
208 /** Atom has been previously soft-stoppped */
209 #define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
210 /** Atom has been previously retried to execute */
211 #define KBASE_KATOM_FLAGS_RERUN (1<<2)
212 #define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
214 typedef struct kbase_jd_atom kbase_jd_atom;
218 mali_addr64 gpu_address;
219 struct kbase_mem_phy_alloc * alloc;
222 struct kbase_jd_atom {
223 struct work_struct work;
224 ktime_t start_timestamp;
229 struct list_head dep_head[2];
230 struct list_head dep_item[2];
231 struct kbase_jd_atom *dep_atom[2];
234 struct kbase_ext_res * extres;
239 kbase_atom_coreref_state coreref_state;
241 struct list_head node;
242 struct kds_resource_set *kds_rset;
243 mali_bool kds_dep_satisfied;
244 #endif /* CONFIG_KDS */
246 struct sync_fence *fence;
247 struct sync_fence_waiter sync_waiter;
248 #endif /* CONFIG_SYNC */
250 /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
251 base_jd_event_code event_code;
252 base_jd_core_req core_req; /**< core requirements */
253 /** Job Slot to retry submitting to if submission from IRQ handler failed
255 * NOTE: see if this can be unified into the another member e.g. the event */
256 int retry_submit_on_slot;
258 kbasep_js_policy_job_info sched_info;
259 /* atom priority scaled to nice range with +20 offset 0..39 */
262 int poking; /* BASE_HW_ISSUE_8316 */
264 wait_queue_head_t completed;
265 kbase_jd_atom_state status;
266 #ifdef CONFIG_GPU_TRACEPOINTS
269 /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */
276 * Theory of operations:
278 * Atom objects are statically allocated within the context structure.
280 * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
283 #define KBASE_JD_DEP_QUEUE_SIZE 256
285 typedef struct kbase_jd_context {
287 kbasep_js_kctx_info sched_info;
288 kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
290 /** Tracks all job-dispatch jobs. This includes those not tracked by
291 * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
294 /** Waitq that reflects whether there are no jobs (including SW-only
295 * dependency jobs). This is set when no jobs are present on the ctx,
296 * and clear when there are jobs.
298 * @note: Job Dispatcher knows about more jobs than the Job Scheduler:
299 * the Job Scheduler is unaware of jobs that are blocked on dependencies,
300 * and SW-only dependency jobs.
302 * This waitq can be waited upon to find out when the context jobs are all
303 * done/cancelled (including those that might've been blocked on
304 * dependencies) - and so, whether it can be terminated. However, it should
305 * only be terminated once it is neither present in the policy-queue (see
306 * kbasep_js_policy_try_evict_ctx() ) nor the run-pool (see
307 * kbasep_js_kctx_info::ctx::is_scheduled).
309 * Since the waitq is only set under kbase_jd_context::lock,
310 * the waiter should also briefly obtain and drop kbase_jd_context::lock to
311 * guarentee that the setter has completed its work on the kbase_context
313 * This must be updated atomically with:
314 * - kbase_jd_context::job_nr */
315 wait_queue_head_t zero_jobs_wait;
317 /** Job Done workqueue. */
318 struct workqueue_struct *job_done_wq;
322 size_t tb_wrap_offset;
325 struct kds_callback kds_cb;
326 #endif /* CONFIG_KDS */
327 #ifdef CONFIG_GPU_TRACEPOINTS
332 typedef struct kbase_jm_slot {
333 /* The number of slots must be a power of two */
334 #define BASE_JM_SUBMIT_SLOTS 16
335 #define BASE_JM_SUBMIT_SLOTS_MASK (BASE_JM_SUBMIT_SLOTS - 1)
337 struct kbase_jd_atom *submitted[BASE_JM_SUBMIT_SLOTS];
339 kbase_context *last_context;
347 typedef enum kbase_midgard_type {
352 } kbase_midgard_type;
354 typedef struct kbase_device_info {
355 kbase_midgard_type dev_type;
359 /** Poking state for BASE_HW_ISSUE_8316 */
361 KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0,
362 KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1
365 /** Poking state for BASE_HW_ISSUE_8316 */
366 typedef u32 kbase_as_poke_state;
369 * Important: Our code makes assumptions that a kbase_as structure is always at
370 * kbase_device->as[number]. This is used to recover the containing
371 * kbase_device from a kbase_as structure.
373 * Therefore, kbase_as structures must not be allocated anywhere else.
375 typedef struct kbase_as {
378 struct workqueue_struct *pf_wq;
379 struct work_struct work_pagefault;
380 struct work_struct work_busfault;
381 mali_addr64 fault_addr;
383 struct mutex transaction_mutex;
385 /* BASE_HW_ISSUE_8316 */
386 struct workqueue_struct *poke_wq;
387 struct work_struct poke_work;
388 /** Protected by kbasep_js_device_data::runpool_irq::lock */
390 /** Protected by kbasep_js_device_data::runpool_irq::lock */
391 kbase_as_poke_state poke_state;
392 struct hrtimer poke_timer;
395 /* tracking of memory usage */
396 typedef struct kbasep_mem_usage {
403 * Instrumentation State Machine States
406 /** State where instrumentation is not active */
407 KBASE_INSTR_STATE_DISABLED = 0,
408 /** State machine is active and ready for a command. */
409 KBASE_INSTR_STATE_IDLE,
410 /** Hardware is currently dumping a frame. */
411 KBASE_INSTR_STATE_DUMPING,
412 /** We've requested a clean to occur on a workqueue */
413 KBASE_INSTR_STATE_REQUEST_CLEAN,
414 /** Hardware is currently cleaning and invalidating caches. */
415 KBASE_INSTR_STATE_CLEANING,
416 /** Cache clean completed, and either a) a dump is complete, or
417 * b) instrumentation can now be setup. */
418 KBASE_INSTR_STATE_CLEANED,
419 /** kbasep_reset_timeout_worker() has started (but not compelted) a
420 * reset. This generally indicates the current action should be aborted, and
421 * kbasep_reset_timeout_worker() will handle the cleanup */
422 KBASE_INSTR_STATE_RESETTING,
423 /** An error has occured during DUMPING (page fault). */
424 KBASE_INSTR_STATE_FAULT
427 typedef struct kbasep_mem_device {
429 u32 ump_device_id; /* Which UMP device this GPU should be mapped to.
430 Read-only, copied from platform configuration on startup. */
431 #endif /* CONFIG_UMP */
433 u32 per_process_memory_limit; /* How much memory (in bytes) a single process can access.
434 Read-only, copied from platform configuration on startup. */
435 kbasep_mem_usage usage; /* Tracks usage of OS shared memory. Initialized with platform
436 configuration data, updated when OS memory is allocated/freed. */
442 #define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
445 /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
446 * THIS MUST BE USED AT THE START OF THE ENUM */
447 #define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
448 #include "mali_kbase_trace_defs.h"
449 #undef KBASE_TRACE_CODE_MAKE_CODE
450 /* Comma on its own, to extend the list */
452 /* Must be the last in the enum */
453 KBASE_TRACE_CODE_COUNT
456 #define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
457 #define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
459 typedef struct kbase_trace {
460 struct timespec timestamp;
468 unsigned long info_val;
475 /** Event IDs for the power management framework.
477 * Any of these events might be missed, so they should not be relied upon to
478 * find the precise state of the GPU at a particular time in the
479 * trace. Overall, we should get a high percentage of these events for
480 * statisical purposes, and so a few missing should not be a problem */
481 typedef enum kbase_timeline_pm_event {
482 /* helper for tests */
483 KBASEP_TIMELINE_PM_EVENT_FIRST,
485 /** Event reserved for backwards compatibility with 'init' events */
486 KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST,
488 /** The power state of the device has changed.
490 * Specifically, the device has reached a desired or available state.
492 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED,
494 /** The GPU is becoming active.
496 * This event is sent when the first context is about to use the GPU.
498 KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE,
500 /** The GPU is becoming idle.
502 * This event is sent when the last context has finished using the GPU.
504 KBASE_TIMELINE_PM_EVENT_GPU_IDLE,
506 /** Event reserved for backwards compatibility with 'policy_change'
508 KBASE_TIMELINE_PM_EVENT_RESERVED_4,
510 /** Event reserved for backwards compatibility with 'system_suspend'
512 KBASE_TIMELINE_PM_EVENT_RESERVED_5,
514 /** Event reserved for backwards compatibility with 'system_resume'
516 KBASE_TIMELINE_PM_EVENT_RESERVED_6,
518 /** The job scheduler is requesting to power up/down cores.
520 * This event is sent when:
521 * - powered down cores are needed to complete a job
522 * - powered up cores are not needed anymore
524 KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
526 KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
527 } kbase_timeline_pm_event;
529 #ifdef CONFIG_MALI_TRACE_TIMELINE
530 typedef struct kbase_trace_kctx_timeline {
531 atomic_t jd_atoms_in_flight;
533 } kbase_trace_kctx_timeline;
535 typedef struct kbase_trace_kbdev_timeline {
537 struct dentry *dentry;
539 /* Note: strictly speaking, not needed, because it's in sync with
540 * kbase_device::jm_slots[]::submitted_nr
542 * But it's kept as an example of how to add global timeline tracking
545 * The caller must hold kbasep_js_device_data::runpool_irq::lock when
547 u8 slot_atoms_submitted[BASE_JM_SUBMIT_SLOTS];
549 /* Last UID for each PM event */
550 atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1];
551 /* Counter for generating PM event UIDs */
552 atomic_t pm_event_uid_counter;
554 * L2 transition state - MALI_TRUE indicates that the transition is ongoing
555 * Expected to be protected by pm.power_change_lock */
556 mali_bool l2_transitioning;
557 } kbase_trace_kbdev_timeline;
558 #endif /* CONFIG_MALI_TRACE_TIMELINE */
561 typedef struct kbasep_kctx_list_element {
562 struct list_head link;
564 } kbasep_kctx_list_element;
566 struct kbase_device {
567 /** jm_slots is protected by kbasep_js_device_data::runpool_irq::lock */
568 kbase_jm_slot jm_slots[BASE_JM_MAX_NR_SLOTS];
569 s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
570 kbase_os_device osdev;
571 kbase_pm_device_data pm;
572 kbasep_js_device_data js_data;
573 kbasep_mem_device memdev;
575 kbase_as as[BASE_MAX_NR_AS];
577 spinlock_t mmu_mask_change;
579 kbase_gpu_props gpu_props;
581 /** List of SW workarounds for HW issues */
582 unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
583 /** List of features available */
584 unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
586 /* Cached present bitmaps - these are the same as the corresponding hardware registers */
587 u64 shader_present_bitmap;
588 u64 tiler_present_bitmap;
589 u64 l2_present_bitmap;
590 u64 l3_present_bitmap;
592 /* Bitmaps of cores that are currently in use (running jobs).
593 * These should be kept up to date by the job scheduler.
595 * pm.power_change_lock should be held when accessing these members.
597 * kbase_pm_check_transitions_nolock() should be called when bits are
598 * cleared to update the power management system and allow transitions to
600 u64 shader_inuse_bitmap;
602 /* Refcount for cores in use */
603 u32 shader_inuse_cnt[64];
605 /* Bitmaps of cores the JS needs for jobs ready to run */
606 u64 shader_needed_bitmap;
608 /* Refcount for cores needed */
609 u32 shader_needed_cnt[64];
613 u32 tiler_needed_cnt;
615 /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
618 /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be
619 * submitted to these cores. These are updated by the power management code. The job scheduler should avoid
620 * submitting new jobs to any cores that are not marked as available.
622 * pm.power_change_lock should be held when accessing these members.
624 u64 shader_available_bitmap;
625 u64 tiler_available_bitmap;
626 u64 l2_available_bitmap;
628 u64 shader_ready_bitmap;
629 u64 shader_transitioning_bitmap;
631 s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */
632 s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */
634 /* Structure used for instrumentation and HW counters dumping */
636 /* The lock should be used when accessing any of the following members */
641 wait_queue_head_t wait;
643 kbase_instr_state state;
644 wait_queue_head_t cache_clean_wait;
645 struct workqueue_struct *cache_clean_wq;
646 struct work_struct cache_clean_work;
648 kbase_context *suspended_kctx;
649 kbase_uk_hwcnt_setup suspended_state;
652 /* Set when we're about to reset the GPU */
654 #define KBASE_RESET_GPU_NOT_PENDING 0 /* The GPU reset isn't pending */
655 #define KBASE_RESET_GPU_PREPARED 1 /* kbase_prepare_to_reset_gpu has been called */
656 #define KBASE_RESET_GPU_COMMITTED 2 /* kbase_reset_gpu has been called - the reset will now definitely happen
657 * within the timeout period */
658 #define KBASE_RESET_GPU_HAPPENING 3 /* The GPU reset process is currently occuring (timeout has expired or
659 * kbasep_try_reset_gpu_early was called) */
661 /* Work queue and work item for performing the reset in */
662 struct workqueue_struct *reset_workq;
663 struct work_struct reset_work;
664 wait_queue_head_t reset_wait;
665 struct hrtimer reset_timer;
667 /*value to be written to the irq_throttle register each time an irq is served */
668 atomic_t irq_throttle_cycles;
670 const kbase_attribute *config_attributes;
672 #if KBASE_TRACE_ENABLE != 0
673 spinlock_t trace_lock;
676 kbase_trace *trace_rbuf;
679 #if MALI_CUSTOMER_RELEASE == 0
680 /* This is used to override the current job scheduler values for
681 * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_SS
682 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS
683 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS
684 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS
685 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS.
687 * These values are set via the js_timeouts sysfs file.
689 u32 js_soft_stop_ticks;
690 u32 js_hard_stop_ticks_ss;
691 u32 js_hard_stop_ticks_nss;
692 u32 js_reset_ticks_ss;
693 u32 js_reset_ticks_nss;
696 struct mutex cacheclean_lock;
698 /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */
699 void *platform_context;
701 /** Count of contexts keeping the GPU powered */
702 atomic_t keep_gpu_powered_count;
704 /* List of kbase_contexts created */
705 struct list_head kctx_list;
706 struct mutex kctx_list_lock;
708 #ifdef CONFIG_MALI_T6XX_RT_PM
709 struct delayed_work runtime_pm_workqueue;
712 #ifdef CONFIG_MALI_TRACE_TIMELINE
713 kbase_trace_kbdev_timeline timeline;
716 #ifdef CONFIG_DEBUG_FS
717 /* directory for debugfs entries */
718 struct dentry *mali_debugfs_directory;
719 /* debugfs entry for gpu_memory */
720 struct dentry *gpu_memory_dentry;
721 /* debugfs entry for trace */
722 struct dentry *trace_dentry;
723 #endif /* CONFIG_DEBUG_FS */
725 /* fbdump profiling controls set by gator */
726 u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX];
729 struct kbase_context {
732 struct list_head event_list;
733 struct mutex event_mutex;
734 mali_bool event_closed;
735 struct workqueue_struct *event_workq;
737 atomic_t setup_complete;
738 atomic_t setup_in_progress;
740 mali_bool keep_gpu_powered;
742 u64 *mmu_teardown_pages;
744 struct mutex reg_lock; /* To be converted to a rwlock? */
745 struct rb_root reg_rbtree; /* Red-Black tree of GPU regions (live regions) */
747 kbase_os_context osctx;
748 kbase_jd_context jctx;
749 kbasep_mem_usage usage;
750 atomic_t nonmapped_pages;
752 kbase_mem_allocator osalloc;
753 kbase_mem_allocator * pgd_allocator;
755 struct list_head waiting_soft_jobs;
757 struct list_head waiting_kds_resource;
759 /** This is effectively part of the Run Pool, because it only has a valid
760 * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
762 * The kbasep_js_device_data::runpool_irq::lock must be held whilst accessing
765 * If the context relating to this as_nr is required, you must use
766 * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
767 * whilst you're using it. Alternatively, just hold the kbasep_js_device_data::runpool_irq::lock
768 * to ensure the context doesn't disappear (but this has restrictions on what other locks
769 * you can take whilst doing this) */
774 * Flags are in jctx.sched_info.ctx.flags
775 * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
777 * All other flags must be added there */
778 spinlock_t mm_update_lock;
779 struct mm_struct * process_mm;
781 #ifdef CONFIG_MALI_TRACE_TIMELINE
782 kbase_trace_kctx_timeline timeline;
786 typedef enum kbase_reg_access_type {
789 } kbase_reg_access_type;
791 typedef enum kbase_share_attr_bits {
792 /* (1ULL << 8) bit is reserved */
793 SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */
794 SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */
795 } kbase_share_attr_bits;
797 /* Conversion helpers for setting up high resolution timers */
798 #define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime((x)*1000000U))
799 #define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
801 /* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
802 #define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
803 /* Maximum number of loops polling the GPU for an AS flush to complete before we assume the GPU has hung */
804 #define KBASE_AS_FLUSH_MAX_LOOPS 100000
806 #endif /* _KBASE_DEFS_H_ */