3 * (C) COPYRIGHT ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
21 * @file mali_kbase_defs.h
23 * Defintions (types, defines, etcs) common to Kbase. They are placed here to
24 * allow the hierarchy of header files to work.
27 #ifndef _KBASE_DEFS_H_
28 #define _KBASE_DEFS_H_
30 #include <mali_kbase_config.h>
31 #include <mali_base_hwconfig.h>
32 #include <mali_kbase_mem_lowlevel.h>
33 #include <mali_kbase_mem_alloc.h>
34 #include <mali_kbase_mmu_hw.h>
37 #include <linux/atomic.h>
38 #include <linux/mempool.h>
39 #include <linux/slab.h>
42 #include <linux/kds.h>
43 #endif /* CONFIG_KDS */
47 #endif /* CONFIG_SYNC */
49 #ifdef CONFIG_DEBUG_FS
50 #include <linux/debugfs.h>
51 #endif /* CONFIG_DEBUG_FS */
53 #ifdef CONFIG_PM_DEVFREQ
54 #include <linux/devfreq.h>
55 #endif /* CONFIG_DEVFREQ */
57 /** Enable SW tracing when set */
58 #ifdef CONFIG_MALI_MIDGARD_ENABLE_TRACE
59 #define KBASE_TRACE_ENABLE 1
62 #ifndef KBASE_TRACE_ENABLE
63 #ifdef CONFIG_MALI_DEBUG
64 #define KBASE_TRACE_ENABLE 1
66 #define KBASE_TRACE_ENABLE 0
67 #endif /* CONFIG_MALI_DEBUG */
68 #endif /* KBASE_TRACE_ENABLE */
70 /** Dump Job slot trace on error (only active if KBASE_TRACE_ENABLE != 0) */
71 #define KBASE_TRACE_DUMP_ON_JOB_SLOT_ERROR 1
74 * Number of milliseconds before resetting the GPU when a job cannot be "zapped" from the hardware.
75 * Note that the time is actually ZAP_TIMEOUT+SOFT_STOP_RESET_TIMEOUT between the context zap starting and the GPU
76 * actually being reset to give other contexts time for their jobs to be soft-stopped and removed from the hardware
79 #define ZAP_TIMEOUT 1000
81 /** Number of milliseconds before we time out on a GPU soft/hard reset */
82 #define RESET_TIMEOUT 500
85 * Prevent soft-stops from occuring in scheduling situations
87 * This is not due to HW issues, but when scheduling is desired to be more predictable.
89 * Therefore, soft stop may still be disabled due to HW issues.
91 * @note Soft stop will still be used for non-scheduling purposes e.g. when terminating a context.
93 * @note if not in use, define this value to 0 instead of \#undef'ing it
95 #define KBASE_DISABLE_SCHEDULING_SOFT_STOPS 0
97 * Prevent hard-stops from occuring in scheduling situations
99 * This is not due to HW issues, but when scheduling is desired to be more predictable.
101 * @note Hard stop will still be used for non-scheduling purposes e.g. when terminating a context.
103 * @note if not in use, define this value to 0 instead of \#undef'ing it
105 #define KBASE_DISABLE_SCHEDULING_HARD_STOPS 0
108 * The maximum number of Job Slots to support in the Hardware.
110 * You can optimize this down if your target devices will only ever support a
111 * small number of job slots.
113 #define BASE_JM_MAX_NR_SLOTS 16
116 * The maximum number of Address Spaces to support in the Hardware.
118 * You can optimize this down if your target devices will only ever support a
119 * small number of Address Spaces
121 #define BASE_MAX_NR_AS 16
124 #define ENTRY_IS_ATE 1ULL
125 #define ENTRY_IS_INVAL 2ULL
126 #define ENTRY_IS_PTE 3ULL
128 #define MIDGARD_MMU_VA_BITS 48
130 #define ENTRY_ATTR_BITS (7ULL << 2) /* bits 4:2 */
131 #define ENTRY_RD_BIT (1ULL << 6)
132 #define ENTRY_WR_BIT (1ULL << 7)
133 #define ENTRY_SHARE_BITS (3ULL << 8) /* bits 9:8 */
134 #define ENTRY_ACCESS_BIT (1ULL << 10)
135 #define ENTRY_NX_BIT (1ULL << 54)
137 #define ENTRY_FLAGS_MASK (ENTRY_ATTR_BITS | ENTRY_RD_BIT | ENTRY_WR_BIT | ENTRY_SHARE_BITS | ENTRY_ACCESS_BIT | ENTRY_NX_BIT)
139 #if MIDGARD_MMU_VA_BITS > 39
140 #define MIDGARD_MMU_TOPLEVEL 0
142 #define MIDGARD_MMU_TOPLEVEL 1
145 #define GROWABLE_FLAGS_REQUIRED (KBASE_REG_PF_GROW)
147 /** setting in kbase_context::as_nr that indicates it's invalid */
148 #define KBASEP_AS_NR_INVALID (-1)
150 #define KBASE_LOCK_REGION_MAX_SIZE (63)
151 #define KBASE_LOCK_REGION_MIN_SIZE (11)
153 #define KBASE_TRACE_SIZE_LOG2 8 /* 256 entries */
154 #define KBASE_TRACE_SIZE (1 << KBASE_TRACE_SIZE_LOG2)
155 #define KBASE_TRACE_MASK ((1 << KBASE_TRACE_SIZE_LOG2)-1)
157 #include "mali_kbase_js_defs.h"
159 #define KBASEP_FORCE_REPLAY_DISABLED 0
161 /* Maximum force replay limit when randomization is enabled */
162 #define KBASEP_FORCE_REPLAY_RANDOM_LIMIT 16
164 /** Atom has been previously soft-stoppped */
165 #define KBASE_KATOM_FLAG_BEEN_SOFT_STOPPPED (1<<1)
166 /** Atom has been previously retried to execute */
167 #define KBASE_KATOM_FLAGS_RERUN (1<<2)
168 #define KBASE_KATOM_FLAGS_JOBCHAIN (1<<3)
169 /** Atom has been previously hard-stopped. */
170 #define KBASE_KATOM_FLAG_BEEN_HARD_STOPPED (1<<4)
171 /** Atom has caused us to enter disjoint state */
172 #define KBASE_KATOM_FLAG_IN_DISJOINT (1<<5)
174 /* SW related flags about types of JS_COMMAND action
175 * NOTE: These must be masked off by JS_COMMAND_MASK */
177 /** This command causes a disjoint event */
178 #define JS_COMMAND_SW_CAUSES_DISJOINT 0x100
180 /** Bitmask of all SW related flags */
181 #define JS_COMMAND_SW_BITS (JS_COMMAND_SW_CAUSES_DISJOINT)
183 #if (JS_COMMAND_SW_BITS & JS_COMMAND_MASK)
184 #error JS_COMMAND_SW_BITS not masked off by JS_COMMAND_MASK. Must update JS_COMMAND_SW_<..> bitmasks
187 /** Soft-stop command that causes a Disjoint event. This of course isn't
188 * entirely masked off by JS_COMMAND_MASK */
189 #define JS_COMMAND_SOFT_STOP_WITH_SW_DISJOINT \
190 (JS_COMMAND_SW_CAUSES_DISJOINT | JS_COMMAND_SOFT_STOP)
193 struct kbase_jd_atom_dependency
195 struct kbase_jd_atom *atom;
200 * @brief The function retrieves a read-only reference to the atom field from
201 * the kbase_jd_atom_dependency structure
203 * @param[in] dep kbase jd atom dependency.
205 * @return readonly reference to dependent ATOM.
207 static INLINE const struct kbase_jd_atom* const kbase_jd_katom_dep_atom(const struct kbase_jd_atom_dependency* dep)
209 LOCAL_ASSERT(dep != NULL);
211 return (const struct kbase_jd_atom* const )(dep->atom);
215 * @brief The function retrieves a read-only reference to the dependency type field from
216 * the kbase_jd_atom_dependency structure
218 * @param[in] dep kbase jd atom dependency.
220 * @return A dependency type value.
222 static INLINE const u8 kbase_jd_katom_dep_type(const struct kbase_jd_atom_dependency* dep)
224 LOCAL_ASSERT(dep != NULL);
226 return dep->dep_type;
230 * @brief Setter macro for dep_atom array entry in kbase_jd_atom
232 * @param[in] dep The kbase jd atom dependency.
233 * @param[in] a The ATOM to be set as a dependency.
234 * @param type The ATOM dependency type to be set.
237 static INLINE void kbase_jd_katom_dep_set(const struct kbase_jd_atom_dependency* const_dep,
238 struct kbase_jd_atom * a,
241 struct kbase_jd_atom_dependency* dep;
243 LOCAL_ASSERT(const_dep != NULL);
245 dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
248 dep->dep_type = type;
252 * @brief Setter macro for dep_atom array entry in kbase_jd_atom
254 * @param[in] dep The kbase jd atom dependency to be cleared.
257 static INLINE void kbase_jd_katom_dep_clear(const struct kbase_jd_atom_dependency* const_dep)
259 struct kbase_jd_atom_dependency* dep;
261 LOCAL_ASSERT(const_dep != NULL);
263 dep = (REINTERPRET_CAST(struct kbase_jd_atom_dependency* )const_dep);
266 dep->dep_type = BASE_JD_DEP_TYPE_INVALID;
271 mali_addr64 gpu_address;
272 struct kbase_mem_phy_alloc * alloc;
275 struct kbase_jd_atom {
276 struct work_struct work;
277 ktime_t start_timestamp;
278 u64 time_spent_us; /**< Total time spent on the GPU in microseconds */
280 struct base_jd_udata udata;
281 struct kbase_context *kctx;
283 struct list_head dep_head[2];
284 struct list_head dep_item[2];
285 const struct kbase_jd_atom_dependency dep[2];
288 struct kbase_ext_res * extres;
293 enum kbase_atom_coreref_state coreref_state;
295 struct list_head node;
296 struct kds_resource_set *kds_rset;
297 mali_bool kds_dep_satisfied;
298 #endif /* CONFIG_KDS */
300 struct sync_fence *fence;
301 struct sync_fence_waiter sync_waiter;
302 #endif /* CONFIG_SYNC */
304 /* Note: refer to kbasep_js_atom_retained_state, which will take a copy of some of the following members */
305 enum base_jd_event_code event_code;
306 base_jd_core_req core_req; /**< core requirements */
307 /** Job Slot to retry submitting to if submission from IRQ handler failed
309 * NOTE: see if this can be unified into the another member e.g. the event */
310 int retry_submit_on_slot;
312 union kbasep_js_policy_job_info sched_info;
313 /* atom priority scaled to nice range with +20 offset 0..39 */
316 int poking; /* BASE_HW_ISSUE_8316 */
318 wait_queue_head_t completed;
319 enum kbase_jd_atom_state status;
320 #ifdef CONFIG_GPU_TRACEPOINTS
323 /* Assigned after atom is completed. Used to check whether PRLAM-10676 workaround should be applied */
328 /* Number of times this atom has been retried. Used by replay soft job.
334 * Theory of operations:
336 * Atom objects are statically allocated within the context structure.
338 * Each atom is the head of two lists, one for the "left" set of dependencies, one for the "right" set.
341 #define KBASE_JD_DEP_QUEUE_SIZE 256
343 struct kbase_jd_context {
345 struct kbasep_js_kctx_info sched_info;
346 struct kbase_jd_atom atoms[BASE_JD_ATOM_COUNT];
348 /** Tracks all job-dispatch jobs. This includes those not tracked by
349 * the scheduler: 'not ready to run' and 'dependency-only' jobs. */
352 /** Waitq that reflects whether there are no jobs (including SW-only
353 * dependency jobs). This is set when no jobs are present on the ctx,
354 * and clear when there are jobs.
356 * @note: Job Dispatcher knows about more jobs than the Job Scheduler:
357 * the Job Scheduler is unaware of jobs that are blocked on dependencies,
358 * and SW-only dependency jobs.
360 * This waitq can be waited upon to find out when the context jobs are all
361 * done/cancelled (including those that might've been blocked on
362 * dependencies) - and so, whether it can be terminated. However, it should
363 * only be terminated once it is neither present in the policy-queue (see
364 * kbasep_js_policy_try_evict_ctx() ) nor the run-pool (see
365 * kbasep_js_kctx_info::ctx::is_scheduled).
367 * Since the waitq is only set under kbase_jd_context::lock,
368 * the waiter should also briefly obtain and drop kbase_jd_context::lock to
369 * guarentee that the setter has completed its work on the kbase_context
371 * This must be updated atomically with:
372 * - kbase_jd_context::job_nr */
373 wait_queue_head_t zero_jobs_wait;
375 /** Job Done workqueue. */
376 struct workqueue_struct *job_done_wq;
380 size_t tb_wrap_offset;
383 struct kds_callback kds_cb;
384 #endif /* CONFIG_KDS */
385 #ifdef CONFIG_GPU_TRACEPOINTS
390 struct kbase_jm_slot {
391 /* The number of slots must be a power of two */
392 #define BASE_JM_SUBMIT_SLOTS 16
393 #define BASE_JM_SUBMIT_SLOTS_MASK (BASE_JM_SUBMIT_SLOTS - 1)
395 struct kbase_jd_atom *submitted[BASE_JM_SUBMIT_SLOTS];
397 struct kbase_context *last_context;
405 struct kbase_device_info {
409 /** Poking state for BASE_HW_ISSUE_8316 */
411 KBASE_AS_POKE_STATE_IN_FLIGHT = 1<<0,
412 KBASE_AS_POKE_STATE_KILLING_POKE = 1<<1
415 /** Poking state for BASE_HW_ISSUE_8316 */
416 typedef u32 kbase_as_poke_state;
418 struct kbase_mmu_setup {
424 * Important: Our code makes assumptions that a struct kbase_as structure is always at
425 * kbase_device->as[number]. This is used to recover the containing
426 * struct kbase_device from a struct kbase_as structure.
428 * Therefore, struct kbase_as structures must not be allocated anywhere else.
433 struct workqueue_struct *pf_wq;
434 struct work_struct work_pagefault;
435 struct work_struct work_busfault;
436 enum kbase_mmu_fault_type fault_type;
438 mali_addr64 fault_addr;
439 struct mutex transaction_mutex;
441 struct kbase_mmu_setup current_setup;
443 /* BASE_HW_ISSUE_8316 */
444 struct workqueue_struct *poke_wq;
445 struct work_struct poke_work;
446 /** Protected by kbasep_js_device_data::runpool_irq::lock */
448 /** Protected by kbasep_js_device_data::runpool_irq::lock */
449 kbase_as_poke_state poke_state;
450 struct hrtimer poke_timer;
453 static inline int kbase_as_has_bus_fault(struct kbase_as *as)
455 return as->fault_type == KBASE_MMU_FAULT_TYPE_BUS;
458 static inline int kbase_as_has_page_fault(struct kbase_as *as)
460 return as->fault_type == KBASE_MMU_FAULT_TYPE_PAGE;
464 * Instrumentation State Machine States
466 enum kbase_instr_state {
467 /** State where instrumentation is not active */
468 KBASE_INSTR_STATE_DISABLED = 0,
469 /** State machine is active and ready for a command. */
470 KBASE_INSTR_STATE_IDLE,
471 /** Hardware is currently dumping a frame. */
472 KBASE_INSTR_STATE_DUMPING,
473 /** We've requested a clean to occur on a workqueue */
474 KBASE_INSTR_STATE_REQUEST_CLEAN,
475 /** Hardware is currently cleaning and invalidating caches. */
476 KBASE_INSTR_STATE_CLEANING,
477 /** Cache clean completed, and either a) a dump is complete, or
478 * b) instrumentation can now be setup. */
479 KBASE_INSTR_STATE_CLEANED,
480 /** kbasep_reset_timeout_worker() has started (but not compelted) a
481 * reset. This generally indicates the current action should be aborted, and
482 * kbasep_reset_timeout_worker() will handle the cleanup */
483 KBASE_INSTR_STATE_RESETTING,
484 /** An error has occured during DUMPING (page fault). */
485 KBASE_INSTR_STATE_FAULT
488 void kbasep_reset_timeout_worker(struct work_struct *data);
489 enum hrtimer_restart kbasep_reset_timer_callback(struct hrtimer *data);
491 struct kbasep_mem_device {
492 atomic_t used_pages; /* Tracks usage of OS shared memory. Updated
493 when OS memory is allocated/freed. */
499 #define KBASE_TRACE_CODE(X) KBASE_TRACE_CODE_ ## X
501 enum kbase_trace_code {
502 /* IMPORTANT: USE OF SPECIAL #INCLUDE OF NON-STANDARD HEADER FILE
503 * THIS MUST BE USED AT THE START OF THE ENUM */
504 #define KBASE_TRACE_CODE_MAKE_CODE(X) KBASE_TRACE_CODE(X)
505 #include "mali_kbase_trace_defs.h"
506 #undef KBASE_TRACE_CODE_MAKE_CODE
507 /* Comma on its own, to extend the list */
509 /* Must be the last in the enum */
510 KBASE_TRACE_CODE_COUNT
513 #define KBASE_TRACE_FLAG_REFCOUNT (((u8)1) << 0)
514 #define KBASE_TRACE_FLAG_JOBSLOT (((u8)1) << 1)
517 struct timespec timestamp;
525 unsigned long info_val;
532 /** Event IDs for the power management framework.
534 * Any of these events might be missed, so they should not be relied upon to
535 * find the precise state of the GPU at a particular time in the
536 * trace. Overall, we should get a high percentage of these events for
537 * statisical purposes, and so a few missing should not be a problem */
538 enum kbase_timeline_pm_event {
539 /* helper for tests */
540 KBASEP_TIMELINE_PM_EVENT_FIRST,
542 /** Event reserved for backwards compatibility with 'init' events */
543 KBASE_TIMELINE_PM_EVENT_RESERVED_0 = KBASEP_TIMELINE_PM_EVENT_FIRST,
545 /** The power state of the device has changed.
547 * Specifically, the device has reached a desired or available state.
549 KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED,
551 /** The GPU is becoming active.
553 * This event is sent when the first context is about to use the GPU.
555 KBASE_TIMELINE_PM_EVENT_GPU_ACTIVE,
557 /** The GPU is becoming idle.
559 * This event is sent when the last context has finished using the GPU.
561 KBASE_TIMELINE_PM_EVENT_GPU_IDLE,
563 /** Event reserved for backwards compatibility with 'policy_change'
565 KBASE_TIMELINE_PM_EVENT_RESERVED_4,
567 /** Event reserved for backwards compatibility with 'system_suspend'
569 KBASE_TIMELINE_PM_EVENT_RESERVED_5,
571 /** Event reserved for backwards compatibility with 'system_resume'
573 KBASE_TIMELINE_PM_EVENT_RESERVED_6,
575 /** The job scheduler is requesting to power up/down cores.
577 * This event is sent when:
578 * - powered down cores are needed to complete a job
579 * - powered up cores are not needed anymore
581 KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
583 KBASEP_TIMELINE_PM_EVENT_LAST = KBASE_TIMELINE_PM_EVENT_CHANGE_GPU_STATE,
586 #ifdef CONFIG_MALI_TRACE_TIMELINE
587 struct kbase_trace_kctx_timeline {
588 atomic_t jd_atoms_in_flight;
592 struct kbase_trace_kbdev_timeline {
594 struct dentry *dentry;
596 /* Note: strictly speaking, not needed, because it's in sync with
597 * kbase_device::jm_slots[]::submitted_nr
599 * But it's kept as an example of how to add global timeline tracking
602 * The caller must hold kbasep_js_device_data::runpool_irq::lock when
604 u8 slot_atoms_submitted[BASE_JM_SUBMIT_SLOTS];
606 /* Last UID for each PM event */
607 atomic_t pm_event_uid[KBASEP_TIMELINE_PM_EVENT_LAST+1];
608 /* Counter for generating PM event UIDs */
609 atomic_t pm_event_uid_counter;
611 * L2 transition state - MALI_TRUE indicates that the transition is ongoing
612 * Expected to be protected by pm.power_change_lock */
613 mali_bool l2_transitioning;
615 #endif /* CONFIG_MALI_TRACE_TIMELINE */
618 struct kbasep_kctx_list_element {
619 struct list_head link;
620 struct kbase_context *kctx;
623 #define DEVNAME_SIZE 16
625 struct kbase_device {
626 /** jm_slots is protected by kbasep_js_device_data::runpool_irq::lock */
627 struct kbase_jm_slot jm_slots[BASE_JM_MAX_NR_SLOTS];
628 s8 slot_submit_count_irq[BASE_JM_MAX_NR_SLOTS];
630 struct list_head entry;
632 unsigned int kbase_group_error;
633 struct miscdevice mdev;
637 struct resource *reg_res;
642 #ifdef CONFIG_HAVE_CLK
645 char devname[DEVNAME_SIZE];
647 #ifdef CONFIG_MALI_NO_MALI
649 struct kmem_cache *irq_slab;
650 struct workqueue_struct *irq_workq;
651 atomic_t serving_job_irq;
652 atomic_t serving_gpu_irq;
653 atomic_t serving_mmu_irq;
654 spinlock_t reg_op_lock;
655 #endif /* CONFIG_MALI_NO_MALI */
657 struct kbase_pm_device_data pm;
658 struct kbasep_js_device_data js_data;
659 struct kbasep_mem_device memdev;
660 struct kbase_as as[BASE_MAX_NR_AS];
662 spinlock_t mmu_mask_change;
664 kbase_gpu_props gpu_props;
666 /** List of SW workarounds for HW issues */
667 unsigned long hw_issues_mask[(BASE_HW_ISSUE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
668 /** List of features available */
669 unsigned long hw_features_mask[(BASE_HW_FEATURE_END + BITS_PER_LONG - 1) / BITS_PER_LONG];
671 /* Cached present bitmaps - these are the same as the corresponding hardware registers */
672 u64 shader_present_bitmap;
673 u64 tiler_present_bitmap;
674 u64 l2_present_bitmap;
675 u64 l3_present_bitmap;
677 /* Bitmaps of cores that are currently in use (running jobs).
678 * These should be kept up to date by the job scheduler.
680 * pm.power_change_lock should be held when accessing these members.
682 * kbase_pm_check_transitions_nolock() should be called when bits are
683 * cleared to update the power management system and allow transitions to
685 u64 shader_inuse_bitmap;
687 /* Refcount for cores in use */
688 u32 shader_inuse_cnt[64];
690 /* Bitmaps of cores the JS needs for jobs ready to run */
691 u64 shader_needed_bitmap;
693 /* Refcount for cores needed */
694 u32 shader_needed_cnt[64];
698 u32 tiler_needed_cnt;
700 /* struct for keeping track of the disjoint information
702 * The state is > 0 if the GPU is in a disjoint state. Otherwise 0
703 * The count is the number of disjoint events that have occurred on the GPU
710 /* Refcount for tracking users of the l2 cache, e.g. when using hardware counter instrumentation. */
713 /* Bitmaps of cores that are currently available (powered up and the power policy is happy for jobs to be
714 * submitted to these cores. These are updated by the power management code. The job scheduler should avoid
715 * submitting new jobs to any cores that are not marked as available.
717 * pm.power_change_lock should be held when accessing these members.
719 u64 shader_available_bitmap;
720 u64 tiler_available_bitmap;
721 u64 l2_available_bitmap;
723 u64 shader_ready_bitmap;
724 u64 shader_transitioning_bitmap;
726 s8 nr_hw_address_spaces; /**< Number of address spaces in the GPU (constant after driver initialisation) */
727 s8 nr_user_address_spaces; /**< Number of address spaces available to user contexts */
729 /* Structure used for instrumentation and HW counters dumping */
731 /* The lock should be used when accessing any of the following members */
734 struct kbase_context *kctx;
736 wait_queue_head_t wait;
738 enum kbase_instr_state state;
739 wait_queue_head_t cache_clean_wait;
740 struct workqueue_struct *cache_clean_wq;
741 struct work_struct cache_clean_work;
743 struct kbase_context *suspended_kctx;
744 struct kbase_uk_hwcnt_setup suspended_state;
747 /* Set when we're about to reset the GPU */
749 #define KBASE_RESET_GPU_NOT_PENDING 0 /* The GPU reset isn't pending */
750 #define KBASE_RESET_GPU_PREPARED 1 /* kbase_prepare_to_reset_gpu has been called */
751 #define KBASE_RESET_GPU_COMMITTED 2 /* kbase_reset_gpu has been called - the reset will now definitely happen
752 * within the timeout period */
753 #define KBASE_RESET_GPU_HAPPENING 3 /* The GPU reset process is currently occuring (timeout has expired or
754 * kbasep_try_reset_gpu_early was called) */
756 /* Work queue and work item for performing the reset in */
757 struct workqueue_struct *reset_workq;
758 struct work_struct reset_work;
759 wait_queue_head_t reset_wait;
760 struct hrtimer reset_timer;
762 /*value to be written to the irq_throttle register each time an irq is served */
763 atomic_t irq_throttle_cycles;
765 const struct kbase_attribute *config_attributes;
767 #if KBASE_TRACE_ENABLE
768 spinlock_t trace_lock;
771 struct kbase_trace *trace_rbuf;
774 #if !MALI_CUSTOMER_RELEASE
775 /* This is used to override the current job scheduler values for
776 * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_SS
777 * KBASE_CONFIG_ATTR_JS_STOP_STOP_TICKS_CL
778 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_SS
779 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_CL
780 * KBASE_CONFIG_ATTR_JS_HARD_STOP_TICKS_NSS
781 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_SS
782 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_CL
783 * KBASE_CONFIG_ATTR_JS_RESET_TICKS_NSS.
785 * These values are set via the js_timeouts sysfs file.
787 u32 js_soft_stop_ticks;
788 u32 js_soft_stop_ticks_cl;
789 u32 js_hard_stop_ticks_ss;
790 u32 js_hard_stop_ticks_cl;
791 u32 js_hard_stop_ticks_nss;
792 u32 js_reset_ticks_ss;
793 u32 js_reset_ticks_cl;
794 u32 js_reset_ticks_nss;
797 struct mutex cacheclean_lock;
799 /* Platform specific private data to be accessed by mali_kbase_config_xxx.c only */
800 void *platform_context;
802 /** Count of contexts keeping the GPU powered */
803 atomic_t keep_gpu_powered_count;
805 /* List of kbase_contexts created */
806 struct list_head kctx_list;
807 struct mutex kctx_list_lock;
809 #ifdef CONFIG_MALI_MIDGARD_RT_PM
810 struct delayed_work runtime_pm_workqueue;
813 #ifdef CONFIG_PM_DEVFREQ
814 struct devfreq_dev_profile devfreq_profile;
815 struct devfreq *devfreq;
817 #ifdef CONFIG_DEVFREQ_THERMAL
818 struct devfreq_cooling_device *devfreq_cooling;
819 #ifdef CONFIG_MALI_POWER_ACTOR
820 struct power_actor *power_actor;
825 #ifdef CONFIG_MALI_TRACE_TIMELINE
826 struct kbase_trace_kbdev_timeline timeline;
829 #ifdef CONFIG_DEBUG_FS
830 /* directory for debugfs entries */
831 struct dentry *mali_debugfs_directory;
832 /* debugfs entry for gpu_memory */
833 struct dentry *gpu_memory_dentry;
834 /* debugfs entry for trace */
835 struct dentry *trace_dentry;
836 /* directory for per-ctx memory profiling data */
837 struct dentry *memory_profile_directory;
838 /* Root directory for job dispatcher data */
839 struct dentry *jd_directory;
840 #endif /* CONFIG_DEBUG_FS */
842 /* fbdump profiling controls set by gator */
843 u32 kbase_profiling_controls[FBDUMP_CONTROL_MAX];
846 #if MALI_CUSTOMER_RELEASE == 0
847 /* Number of jobs that are run before a job is forced to fail and
848 * replay. May be KBASEP_FORCE_REPLAY_DISABLED, to disable forced
850 int force_replay_limit;
851 /* Count of jobs between forced failures. Incremented on each job. A
852 * job is forced to fail once this is greater than or equal to
853 * force_replay_limit. */
854 int force_replay_count;
855 /* Core requirement for jobs to be failed and replayed. May be zero. */
856 base_jd_core_req force_replay_core_req;
857 /* MALI_TRUE if force_replay_limit should be randomized. The random
858 * value will be in the range of 1 - KBASEP_FORCE_REPLAY_RANDOM_LIMIT.
860 mali_bool force_replay_random;
863 /* Total number of created contexts */
867 struct kbase_context {
868 struct kbase_device *kbdev;
869 int id; /* System wide unique id */
871 struct list_head event_list;
872 struct mutex event_mutex;
873 mali_bool event_closed;
874 struct workqueue_struct *event_workq;
879 atomic_t setup_complete;
880 atomic_t setup_in_progress;
882 mali_bool keep_gpu_powered;
884 u64 *mmu_teardown_pages;
886 phys_addr_t aliasing_sink_page;
888 struct mutex reg_lock; /* To be converted to a rwlock? */
889 struct rb_root reg_rbtree; /* Red-Black tree of GPU regions (live regions) */
891 unsigned long cookies;
892 struct kbase_va_region *pending_regions[BITS_PER_LONG];
894 wait_queue_head_t event_queue;
898 struct kbase_jd_context jctx;
900 atomic_t nonmapped_pages;
902 struct kbase_mem_allocator osalloc;
903 struct kbase_mem_allocator * pgd_allocator;
905 struct list_head waiting_soft_jobs;
907 struct list_head waiting_kds_resource;
909 /** This is effectively part of the Run Pool, because it only has a valid
910 * setting (!=KBASEP_AS_NR_INVALID) whilst the context is scheduled in
912 * The kbasep_js_device_data::runpool_irq::lock must be held whilst accessing
915 * If the context relating to this as_nr is required, you must use
916 * kbasep_js_runpool_retain_ctx() to ensure that the context doesn't disappear
917 * whilst you're using it. Alternatively, just hold the kbasep_js_device_data::runpool_irq::lock
918 * to ensure the context doesn't disappear (but this has restrictions on what other locks
919 * you can take whilst doing this) */
924 * Flags are in jctx.sched_info.ctx.flags
925 * Mutable flags *must* be accessed under jctx.sched_info.ctx.jsctx_mutex
927 * All other flags must be added there */
928 spinlock_t mm_update_lock;
929 struct mm_struct * process_mm;
931 #ifdef CONFIG_MALI_TRACE_TIMELINE
932 struct kbase_trace_kctx_timeline timeline;
934 #ifdef CONFIG_DEBUG_FS
935 /* debugfs entry for memory profile */
936 struct dentry *mem_dentry;
937 /* Content of mem_profile file */
938 char *mem_profile_data;
939 /* Size of @c mem_profile_data */
940 size_t mem_profile_size;
941 /* Spinlock guarding data */
942 spinlock_t mem_profile_lock;
943 /* Per-context directory for JD data */
944 struct dentry *jd_ctx_dir;
945 #endif /* CONFIG_DEBUG_FS */
948 enum kbase_reg_access_type {
953 enum kbase_share_attr_bits {
954 /* (1ULL << 8) bit is reserved */
955 SHARE_BOTH_BITS = (2ULL << 8), /* inner and outer shareable coherency */
956 SHARE_INNER_BITS = (3ULL << 8) /* inner shareable coherency */
959 /* Conversion helpers for setting up high resolution timers */
960 #define HR_TIMER_DELAY_MSEC(x) (ns_to_ktime((x)*1000000U))
961 #define HR_TIMER_DELAY_NSEC(x) (ns_to_ktime(x))
963 /* Maximum number of loops polling the GPU for a cache flush before we assume it must have completed */
964 #define KBASE_CLEAN_CACHE_MAX_LOOPS 100000
965 /* Maximum number of loops polling the GPU for an AS command to complete before we assume the GPU has hung */
966 #define KBASE_AS_INACTIVE_MAX_LOOPS 100000
968 /* Maximum number of times a job can be replayed */
969 #define BASEP_JD_REPLAY_LIMIT 15
971 #endif /* _KBASE_DEFS_H_ */