/*
*
- * (C) COPYRIGHT ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
#ifndef _BASE_KERNEL_H_
#define _BASE_KERNEL_H_
-/* For now we support the legacy API as well as the new API */
-#define BASE_LEGACY_JD_API 1
+#ifndef __user
+#define __user
+#endif
+
+/* Support UK6 IOCTLS */
+#define BASE_LEGACY_UK6_SUPPORT 1
+
+/* Support UK7 IOCTLS */
+/* NB: To support UK6 we also need to support UK7 */
+#define BASE_LEGACY_UK7_SUPPORT 1
+
+/* Support UK8 IOCTLS */
+#define BASE_LEGACY_UK8_SUPPORT 1
-typedef mali_addr64 base_mem_handle;
+/* Support UK9 IOCTLS */
+#define BASE_LEGACY_UK9_SUPPORT 1
+
+typedef struct base_mem_handle {
+ struct {
+ u64 handle;
+ } basep;
+} base_mem_handle;
#include "mali_base_mem_priv.h"
#include "mali_kbase_profiling_gator_api.h"
+#include "mali_midg_coherency.h"
+#include "mali_kbase_gpu_id.h"
/*
* Dependency stuff, keep it private for now. May want to expose it if
#define BASEP_JD_SEM_MASK_IN_WORD(x) (1 << ((x) & (BASEP_JD_SEM_PER_WORD - 1)))
#define BASEP_JD_SEM_ARRAY_SIZE BASEP_JD_SEM_WORD_NR(BASE_JD_ATOM_COUNT)
-#if BASE_LEGACY_JD_API
-/* Size of the ring buffer */
-#define BASEP_JCTX_RB_NRPAGES 4
-#endif /* BASE_LEGACY_JD_API */
+/* Set/reset values for a software event */
+#define BASE_JD_SOFT_EVENT_SET ((unsigned char)1)
+#define BASE_JD_SOFT_EVENT_RESET ((unsigned char)0)
#define BASE_GPU_NUM_TEXTURE_FEATURES_REGISTERS 3
/** 32/64-bit neutral way to represent pointers */
typedef union kbase_pointer {
- void *value; /**< client should store their pointers here */
+ void __user *value; /**< client should store their pointers here */
u32 compat_value; /**< 64-bit kernels should fetch value here when handling 32-bit clients */
u64 sizer; /**< Force 64-bit storage for all clients regardless */
} kbase_pointer;
*
*/
enum {
+/* IN */
BASE_MEM_PROT_CPU_RD = (1U << 0), /**< Read access CPU side */
BASE_MEM_PROT_CPU_WR = (1U << 1), /**< Write access CPU side */
BASE_MEM_PROT_GPU_RD = (1U << 2), /**< Read access GPU side */
BASE_MEM_PROT_GPU_WR = (1U << 3), /**< Write access GPU side */
- BASE_MEM_PROT_GPU_EX = (1U << 4), /**< Execute allowed on the GPU side */
+ BASE_MEM_PROT_GPU_EX = (1U << 4), /**< Execute allowed on the GPU
+ side */
- /* Note that the HINT flags are obsolete now. If you want the memory
- * to be cached on the CPU please use the BASE_MEM_CACHED_CPU flag
+ /* BASE_MEM_HINT flags have been removed, but their values are reserved
+ * for backwards compatibility with older user-space drivers. The values
+ * can be re-used once support for r5p0 user-space drivers is removed,
+ * presumably in r7p0.
+ *
+ * RESERVED: (1U << 5)
+ * RESERVED: (1U << 6)
+ * RESERVED: (1U << 7)
+ * RESERVED: (1U << 8)
*/
- BASE_MEM_HINT_CPU_RD = (1U << 5), /**< Heavily read CPU side - OBSOLETE */
- BASE_MEM_HINT_CPU_WR = (1U << 6), /**< Heavily written CPU side - OBSOLETE */
- BASE_MEM_HINT_GPU_RD = (1U << 7), /**< Heavily read GPU side - OBSOLETE */
- BASE_MEM_HINT_GPU_WR = (1U << 8), /**< Heavily written GPU side - OBSOLETE */
-
- BASE_MEM_GROW_ON_GPF = (1U << 9), /**< Grow backing store on GPU Page Fault */
- BASE_MEM_COHERENT_SYSTEM = (1U << 10), /**< Page coherence Outer shareable */
- BASE_MEM_COHERENT_LOCAL = (1U << 11), /**< Page coherence Inner shareable */
- BASE_MEM_CACHED_CPU = (1U << 12), /**< Should be cached on the CPU */
+ BASE_MEM_GROW_ON_GPF = (1U << 9), /**< Grow backing store on GPU
+ Page Fault */
+
+ BASE_MEM_COHERENT_SYSTEM = (1U << 10), /**< Page coherence Outer
+ shareable, if available */
+ BASE_MEM_COHERENT_LOCAL = (1U << 11), /**< Page coherence Inner
+ shareable */
+ BASE_MEM_CACHED_CPU = (1U << 12), /**< Should be cached on the
+ CPU */
+
+/* IN/OUT */
+ BASE_MEM_SAME_VA = (1U << 13), /**< Must have same VA on both the GPU
+ and the CPU */
+/* OUT */
+ BASE_MEM_NEED_MMAP = (1U << 14), /**< Must call mmap to aquire a GPU
+ address for the alloc */
+/* IN */
+ BASE_MEM_COHERENT_SYSTEM_REQUIRED = (1U << 15), /**< Page coherence
+ Outer shareable, required. */
+ BASE_MEM_SECURE = (1U << 16), /**< Secure memory */
+ BASE_MEM_DONT_NEED = (1U << 17), /**< Not needed physical
+ memory */
- BASE_MEM_SAME_VA = (1U << 13) /**< Must have same VA on both the GPU and the CPU */
};
/**
- * @brief Memory types supported by @a base_mem_import
+ * @brief Number of bits used as flags for base memory management
+ *
+ * Must be kept in sync with the ::base_mem_alloc_flags flags
+ */
+#define BASE_MEM_FLAGS_NR_BITS 18
+
+/**
+ * A mask for all output bits, excluding IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_OUTPUT_MASK BASE_MEM_NEED_MMAP
+
+/**
+ * A mask for all input bits, including IN/OUT bits.
+ */
+#define BASE_MEM_FLAGS_INPUT_MASK \
+ (((1 << BASE_MEM_FLAGS_NR_BITS) - 1) & ~BASE_MEM_FLAGS_OUTPUT_MASK)
+
+/**
+ * A mask for all the flags which are modifiable via the base_mem_set_flags
+ * interface.
+ */
+#define BASE_MEM_FLAGS_MODIFIABLE \
+ (BASE_MEM_DONT_NEED | BASE_MEM_COHERENT_SYSTEM | \
+ BASE_MEM_COHERENT_LOCAL)
+
+/**
+ * enum base_mem_import_type - Memory types supported by @a base_mem_import
+ *
+ * @BASE_MEM_IMPORT_TYPE_INVALID: Invalid type
+ * @BASE_MEM_IMPORT_TYPE_UMP: UMP import. Handle type is ump_secure_id.
+ * @BASE_MEM_IMPORT_TYPE_UMM: UMM import. Handle type is a file descriptor (int)
+ * @BASE_MEM_IMPORT_TYPE_USER_BUFFER: User buffer import. Handle is a
+ * base_mem_import_user_buffer
*
* Each type defines what the supported handle type is.
*
*/
typedef enum base_mem_import_type {
BASE_MEM_IMPORT_TYPE_INVALID = 0,
- /** UMP import. Handle type is ump_secure_id. */
BASE_MEM_IMPORT_TYPE_UMP = 1,
- /** UMM import. Handle type is a file descriptor (int) */
- BASE_MEM_IMPORT_TYPE_UMM = 2
+ BASE_MEM_IMPORT_TYPE_UMM = 2,
+ BASE_MEM_IMPORT_TYPE_USER_BUFFER = 3
} base_mem_import_type;
-/* legacy API wrappers */
-#define base_tmem_import_type base_mem_import_type
-#define BASE_TMEM_IMPORT_TYPE_INVALID BASE_MEM_IMPORT_TYPE_INVALID
-#define BASE_TMEM_IMPORT_TYPE_UMP BASE_MEM_IMPORT_TYPE_UMP
-#define BASE_TMEM_IMPORT_TYPE_UMM BASE_MEM_IMPORT_TYPE_UMM
+/**
+ * struct base_mem_import_user_buffer - Handle of an imported user buffer
+ *
+ * @ptr: kbase_pointer to imported user buffer
+ * @length: length of imported user buffer in bytes
+ *
+ * This structure is used to represent a handle of an imported user buffer.
+ */
+
+struct base_mem_import_user_buffer {
+ kbase_pointer ptr;
+ u64 length;
+};
+
+/**
+ * @brief Invalid memory handle.
+ *
+ * Return value from functions returning @ref base_mem_handle on error.
+ *
+ * @warning @ref base_mem_handle_new_invalid must be used instead of this macro
+ * in C++ code or other situations where compound literals cannot be used.
+ */
+#define BASE_MEM_INVALID_HANDLE ((base_mem_handle) { {BASEP_MEM_INVALID_HANDLE} })
/**
- * @brief Invalid memory handle type.
- * Return value from functions returning @a base_mem_handle on error.
+ * @brief Special write-alloc memory handle.
+ *
+ * A special handle is used to represent a region where a special page is mapped
+ * with a write-alloc cache setup, typically used when the write result of the
+ * GPU isn't needed, but the GPU must write anyway.
+ *
+ * @warning @ref base_mem_handle_new_write_alloc must be used instead of this macro
+ * in C++ code or other situations where compound literals cannot be used.
*/
-#define BASE_MEM_INVALID_HANDLE (0ull << 12)
+#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE ((base_mem_handle) { {BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE} })
+
+#define BASEP_MEM_INVALID_HANDLE (0ull << 12)
#define BASE_MEM_MMU_DUMP_HANDLE (1ull << 12)
#define BASE_MEM_TRACE_BUFFER_HANDLE (2ull << 12)
#define BASE_MEM_MAP_TRACKING_HANDLE (3ull << 12)
-#define BASE_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
+#define BASEP_MEM_WRITE_ALLOC_PAGES_HANDLE (4ull << 12)
/* reserved handles ..-64<<PAGE_SHIFT> for future special handles */
#define BASE_MEM_COOKIE_BASE (64ul << 12)
#define BASE_MEM_FIRST_FREE_ADDRESS ((BITS_PER_LONG << 12) + \
BASE_MEM_COOKIE_BASE)
+/* Mask to detect 4GB boundary alignment */
+#define BASE_MEM_MASK_4GB 0xfffff000UL
+
+
/* Bit mask of cookies used for for memory allocation setup */
#define KBASE_COOKIE_MASK ~1UL /* bit 0 is reserved */
-/**
- * @brief Number of bits used as flags for base memory management
- *
- * Must be kept in sync with the ::base_mem_alloc_flags flags
- */
-#define BASE_MEM_FLAGS_NR_BITS 14
/**
* @brief Result codes of changing the size of the backing store allocated to a tmem region
* by the accessors.
*/
typedef struct base_syncset {
- basep_syncset basep_sset;
+ struct basep_syncset basep_sset;
} base_syncset;
/** @} end group base_user_api_memory_defered */
*/
typedef struct base_import_handle {
struct {
- mali_addr64 handle;
+ u64 handle;
} basep;
} base_import_handle;
} basep;
} base_fence;
-#if BASE_LEGACY_JD_API
-/**
- * @brief A pre- or post- dual dependency.
- *
- * This structure is used to express either
- * @li a single or dual pre-dependency (a job depending on one or two
- * other jobs),
- * @li a single or dual post-dependency (a job resolving a dependency
- * for one or two other jobs).
- *
- * The dependency itself is specified as a u8, where 0 indicates no
- * dependency. A single dependency is expressed by having one of the
- * dependencies set to 0.
- */
-typedef struct base_jd_dep {
- u8 dep[2]; /**< pre/post dependencies */
-} base_jd_dep;
-#endif /* BASE_LEGACY_JD_API */
-
/**
* @brief Per-job data
*
u64 length;
};
+/**
+ * struct base_jit_alloc_info - Structure which describes a JIT allocation
+ * request.
+ * @gpu_alloc_addr: The GPU virtual address to write the JIT
+ * allocated GPU virtual address to.
+ * @va_pages: The minimum number of virtual pages required.
+ * @commit_pages: The minimum number of physical pages which
+ * should back the allocation.
+ * @extent: Granularity of physical pages to grow the
+ * allocation by during a fault.
+ * @id: Unique ID provided by the caller, this is used
+ * to pair allocation and free requests.
+ * Zero is not a valid value.
+ */
+struct base_jit_alloc_info {
+ u64 gpu_alloc_addr;
+ u64 va_pages;
+ u64 commit_pages;
+ u64 extent;
+ u8 id;
+};
+
+/**
+ * @brief Job dependency type.
+ *
+ * A flags field will be inserted into the atom structure to specify whether a dependency is a data or
+ * ordering dependency (by putting it before/after 'core_req' in the structure it should be possible to add without
+ * changing the structure size).
+ * When the flag is set for a particular dependency to signal that it is an ordering only dependency then
+ * errors will not be propagated.
+ */
+typedef u8 base_jd_dep_type;
+
+
+#define BASE_JD_DEP_TYPE_INVALID (0) /**< Invalid dependency */
+#define BASE_JD_DEP_TYPE_DATA (1U << 0) /**< Data dependency */
+#define BASE_JD_DEP_TYPE_ORDER (1U << 1) /**< Order dependency */
+
/**
* @brief Job chain hardware requirements.
*
/* Requires fragment job with AFBC encoding */
#define BASE_JD_REQ_FS_AFBC (1U << 13)
+/**
+ * SW-only requirement: coalesce completion events.
+ * If this bit is set then completion of this atom will not cause an event to
+ * be sent to userspace, whether successful or not; completion events will be
+ * deferred until an atom completes which does not have this bit set.
+ */
+#define BASE_JD_REQ_EVENT_COALESCE (1U << 5)
+
/**
* SW Only requirement: the job chain requires a coherent core group. We don't
* mind which coherent core group is used.
* - Priority is inherited from the replay job.
*/
#define BASE_JD_REQ_SOFT_REPLAY (BASE_JD_REQ_SOFT_JOB | 0x4)
+/**
+ * SW only requirement: event wait/trigger job.
+ *
+ * - BASE_JD_REQ_SOFT_EVENT_WAIT: this job will block until the event is set.
+ * - BASE_JD_REQ_SOFT_EVENT_SET: this job sets the event, thus unblocks the
+ * other waiting jobs. It completes immediately.
+ * - BASE_JD_REQ_SOFT_EVENT_RESET: this job resets the event, making it
+ * possible for other jobs to wait upon. It completes immediately.
+ */
+#define BASE_JD_REQ_SOFT_EVENT_WAIT (BASE_JD_REQ_SOFT_JOB | 0x5)
+#define BASE_JD_REQ_SOFT_EVENT_SET (BASE_JD_REQ_SOFT_JOB | 0x6)
+#define BASE_JD_REQ_SOFT_EVENT_RESET (BASE_JD_REQ_SOFT_JOB | 0x7)
+
+#define BASE_JD_REQ_SOFT_DEBUG_COPY (BASE_JD_REQ_SOFT_JOB | 0x8)
+
+/**
+ * SW only requirement: Just In Time allocation
+ *
+ * This job requests a JIT allocation based on the request in the
+ * @base_jit_alloc_info structure which is passed via the jc element of
+ * the atom.
+ *
+ * It should be noted that the id entry in @base_jit_alloc_info must not
+ * be reused until it has been released via @BASE_JD_REQ_SOFT_JIT_FREE.
+ *
+ * Should this soft job fail it is expected that a @BASE_JD_REQ_SOFT_JIT_FREE
+ * soft job to free the JIT allocation is still made.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_ALLOC (BASE_JD_REQ_SOFT_JOB | 0x9)
+/**
+ * SW only requirement: Just In Time free
+ *
+ * This job requests a JIT allocation created by @BASE_JD_REQ_SOFT_JIT_ALLOC
+ * to be freed. The ID of the JIT allocation is passed via the jc element of
+ * the atom.
+ *
+ * The job will complete immediately.
+ */
+#define BASE_JD_REQ_SOFT_JIT_FREE (BASE_JD_REQ_SOFT_JOB | 0xa)
+
+/**
+ * SW only requirement: Map external resource
+ *
+ * This job requests external resource(s) are mapped once the dependencies
+ * of the job have been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_MAP (BASE_JD_REQ_SOFT_JOB | 0xb)
+/**
+ * SW only requirement: Unmap external resource
+ *
+ * This job requests external resource(s) are unmapped once the dependencies
+ * of the job has been satisfied. The list of external resources are
+ * passed via the jc element of the atom which is a pointer to a
+ * @base_external_resource_list.
+ */
+#define BASE_JD_REQ_SOFT_EXT_RES_UNMAP (BASE_JD_REQ_SOFT_JOB | 0xc)
/**
* HW Requirement: Requires Compute shaders (but not Vertex or Geometry Shaders)
*
* In contrast to @ref BASE_JD_REQ_CS, this does \b not indicate that the Job
* Chain contains 'Geometry Shader' or 'Vertex Shader' jobs.
- *
- * @note This is a more flexible variant of the @ref BASE_CONTEXT_HINT_ONLY_COMPUTE flag,
- * allowing specific jobs to be marked as 'Only Compute' instead of the entire context
*/
#define BASE_JD_REQ_ONLY_COMPUTE (1U << 10)
*
* This is only guaranteed to work for BASE_JD_REQ_ONLY_COMPUTE atoms.
*
- * If the core availability policy is keeping the required core group turned off, then
+ * If the core availability policy is keeping the required core group turned off, then
* the job will fail with a BASE_JD_EVENT_PM_EVENT error code.
*/
#define BASE_JD_REQ_SPECIFIC_COHERENT_GROUP (1U << 11)
#define BASEP_JD_REQ_EVENT_NEVER (1U << 14)
/**
-* These requirement bits are currently unused in base_jd_core_req (currently a u16)
-*/
-
-#define BASEP_JD_REQ_RESERVED_BIT5 (1U << 5)
-#define BASEP_JD_REQ_RESERVED_BIT15 (1U << 15)
-
-/**
-* Mask of all the currently unused requirement bits in base_jd_core_req.
-*/
+ * These requirement bits are currently unused in base_jd_core_req (currently a u16)
+ */
-#define BASEP_JD_REQ_RESERVED (BASEP_JD_REQ_RESERVED_BIT5 | \
- BASEP_JD_REQ_RESERVED_BIT15)
+#define BASEP_JD_REQ_RESERVED (1U << 15)
/**
* Mask of all bits in base_jd_core_req that control the type of the atom.
*
* This allows dependency only atoms to have flags set
*/
-#define BASEP_JD_REQ_ATOM_TYPE (~(BASEP_JD_REQ_RESERVED | BASE_JD_REQ_EVENT_ONLY_ON_FAILURE |\
- BASE_JD_REQ_EXTERNAL_RESOURCES | BASEP_JD_REQ_EVENT_NEVER))
-
-#if BASE_LEGACY_JD_API
-/**
- * @brief A single job chain, with pre/post dependendencies and mem ops
- *
- * This structure is used to describe a single job-chain to be submitted
- * as part of a bag.
- * It contains all the necessary information for Base to take care of this
- * job-chain, including core requirements, priority, syncsets and
- * dependencies.
- */
-typedef struct base_jd_atom {
- mali_addr64 jc; /**< job-chain GPU address */
- base_jd_udata udata; /**< user data */
- base_jd_dep pre_dep; /**< pre-dependencies */
- base_jd_dep post_dep; /**< post-dependencies */
- base_jd_core_req core_req; /**< core requirements */
- u16 nr_syncsets; /**< nr of syncsets following the atom */
- u16 nr_extres; /**< nr of external resources following the atom */
-
- /** @brief Relative priority.
- *
- * A positive value requests a lower priority, whilst a negative value
- * requests a higher priority. Only privileged processes may request a
- * higher priority. For unprivileged processes, a negative priority will
- * be interpreted as zero.
- */
- s8 prio;
+#define BASEP_JD_REQ_ATOM_TYPE (~(BASEP_JD_REQ_RESERVED |\
+ BASE_JD_REQ_EVENT_ONLY_ON_FAILURE |\
+ BASE_JD_REQ_EXTERNAL_RESOURCES |\
+ BASEP_JD_REQ_EVENT_NEVER |\
+ BASE_JD_REQ_EVENT_COALESCE))
+
+/**
+ * @brief States to model state machine processed by kbasep_js_job_check_ref_cores(), which
+ * handles retaining cores for power management and affinity management.
+ *
+ * The state @ref KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY prevents an attack
+ * where lots of atoms could be submitted before powerup, and each has an
+ * affinity chosen that causes other atoms to have an affinity
+ * violation. Whilst the affinity was not causing violations at the time it
+ * was chosen, it could cause violations thereafter. For example, 1000 jobs
+ * could have had their affinity chosen during the powerup time, so any of
+ * those 1000 jobs could cause an affinity violation later on.
+ *
+ * The attack would otherwise occur because other atoms/contexts have to wait for:
+ * -# the currently running atoms (which are causing the violation) to
+ * finish
+ * -# and, the atoms that had their affinity chosen during powerup to
+ * finish. These are run preferrentially because they don't cause a
+ * violation, but instead continue to cause the violation in others.
+ * -# or, the attacker is scheduled out (which might not happen for just 2
+ * contexts)
+ *
+ * By re-choosing the affinity (which is designed to avoid violations at the
+ * time it's chosen), we break condition (2) of the wait, which minimizes the
+ * problem to just waiting for current jobs to finish (which can be bounded if
+ * the Job Scheduling Policy has a timer).
+ */
+enum kbase_atom_coreref_state {
+ /** Starting state: No affinity chosen, and cores must be requested. kbase_jd_atom::affinity==0 */
+ KBASE_ATOM_COREREF_STATE_NO_CORES_REQUESTED,
+ /** Cores requested, but waiting for them to be powered. Requested cores given by kbase_jd_atom::affinity */
+ KBASE_ATOM_COREREF_STATE_WAITING_FOR_REQUESTED_CORES,
+ /** Cores given by kbase_jd_atom::affinity are powered, but affinity might be out-of-date, so must recheck */
+ KBASE_ATOM_COREREF_STATE_RECHECK_AFFINITY,
+ /** Cores given by kbase_jd_atom::affinity are powered, and affinity is up-to-date, but must check for violations */
+ KBASE_ATOM_COREREF_STATE_CHECK_AFFINITY_VIOLATIONS,
+ /** Cores are powered, kbase_jd_atom::affinity up-to-date, no affinity violations: atom can be submitted to HW */
+ KBASE_ATOM_COREREF_STATE_READY
+};
- /**
- * @brief Device number to use, depending on @ref base_jd_core_req flags set.
- *
- * When BASE_JD_REQ_SPECIFIC_COHERENT_GROUP is set, a 'device' is one of
- * the coherent core groups, and so this targets a particular coherent
- * core-group. They are numbered from 0 to (mali_base_gpu_coherent_group_info::num_groups - 1),
- * and the cores targeted by this device_nr will usually be those specified by
- * (mali_base_gpu_coherent_group_info::group[device_nr].core_mask).
- * Further, two atoms from different processes using the same \a device_nr
- * at the same time will always target the same coherent core-group.
- *
- * There are exceptions to when the device_nr is ignored:
- * - when any process in the system uses a BASE_JD_REQ_CS or
- * BASE_JD_REQ_ONLY_COMPUTE atom that can run on all cores across all
- * coherency groups (i.e. also does \b not have the
- * BASE_JD_REQ_COHERENT_GROUP or BASE_JD_REQ_SPECIFIC_COHERENT_GROUP flags
- * set). In this case, such atoms would block device_nr==1 being used due
- * to restrictions on affinity, perhaps indefinitely. To ensure progress is
- * made, the atoms targeted for device_nr 1 will instead be redirected to
- * device_nr 0
- * - During certain HW workarounds, such as BASE_HW_ISSUE_8987, where
- * BASE_JD_REQ_ONLY_COMPUTE atoms must not use the same cores as other
- * atoms. In this case, all atoms are targeted to device_nr == min( num_groups, 1 )
- *
- * Note that the 'device' number for a coherent coregroup cannot exceed
- * (BASE_MAX_COHERENT_GROUPS - 1).
- */
- u8 device_nr;
-} base_jd_atom;
-#endif /* BASE_LEGACY_JD_API */
+/*
+ * Base Atom priority
+ *
+ * Only certain priority levels are actually implemented, as specified by the
+ * BASE_JD_PRIO_<...> definitions below. It is undefined to use a priority
+ * level that is not one of those defined below.
+ *
+ * Priority levels only affect scheduling between atoms of the same type within
+ * a base context, and only after the atoms have had dependencies resolved.
+ * Fragment atoms does not affect non-frament atoms with lower priorities, and
+ * the other way around. For example, a low priority atom that has had its
+ * dependencies resolved might run before a higher priority atom that has not
+ * had its dependencies resolved.
+ *
+ * The scheduling between base contexts/processes and between atoms from
+ * different base contexts/processes is unaffected by atom priority.
+ *
+ * The atoms are scheduled as follows with respect to their priorities:
+ * - Let atoms 'X' and 'Y' be for the same job slot who have dependencies
+ * resolved, and atom 'X' has a higher priority than atom 'Y'
+ * - If atom 'Y' is currently running on the HW, then it is interrupted to
+ * allow atom 'X' to run soon after
+ * - If instead neither atom 'Y' nor atom 'X' are running, then when choosing
+ * the next atom to run, atom 'X' will always be chosen instead of atom 'Y'
+ * - Any two atoms that have the same priority could run in any order with
+ * respect to each other. That is, there is no ordering constraint between
+ * atoms of the same priority.
+ */
+typedef u8 base_jd_prio;
+
+/* Medium atom priority. This is a priority higher than BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_MEDIUM ((base_jd_prio)0)
+/* High atom priority. This is a priority higher than BASE_JD_PRIO_MEDIUM and
+ * BASE_JD_PRIO_LOW */
+#define BASE_JD_PRIO_HIGH ((base_jd_prio)1)
+/* Low atom priority. */
+#define BASE_JD_PRIO_LOW ((base_jd_prio)2)
+
+/* Count of the number of priority levels. This itself is not a valid
+ * base_jd_prio setting */
+#define BASE_JD_NR_PRIO_LEVELS 3
+
+enum kbase_jd_atom_state {
+ /** Atom is not used */
+ KBASE_JD_ATOM_STATE_UNUSED,
+ /** Atom is queued in JD */
+ KBASE_JD_ATOM_STATE_QUEUED,
+ /** Atom has been given to JS (is runnable/running) */
+ KBASE_JD_ATOM_STATE_IN_JS,
+ /** Atom has been completed, but not yet handed back to job dispatcher
+ * for dependency resolution */
+ KBASE_JD_ATOM_STATE_HW_COMPLETED,
+ /** Atom has been completed, but not yet handed back to userspace */
+ KBASE_JD_ATOM_STATE_COMPLETED
+};
typedef u8 base_atom_id; /**< Type big enough to store an atom number in */
+struct base_dependency {
+ base_atom_id atom_id; /**< An atom number */
+ base_jd_dep_type dependency_type; /**< Dependency type */
+};
+
typedef struct base_jd_atom_v2 {
- mali_addr64 jc; /**< job-chain GPU address */
- base_jd_udata udata; /**< user data */
+ u64 jc; /**< job-chain GPU address */
+ struct base_jd_udata udata; /**< user data */
kbase_pointer extres_list; /**< list of external resources */
u16 nr_extres; /**< nr of external resources */
base_jd_core_req core_req; /**< core requirements */
- base_atom_id pre_dep[2]; /**< pre-dependencies */
+ struct base_dependency pre_dep[2]; /**< pre-dependencies, one need to use SETTER function to assign this field,
+ this is done in order to reduce possibility of improper assigment of a dependency field */
base_atom_id atom_number; /**< unique number to identify the atom */
- s8 prio; /**< priority - smaller is higher priority */
+ base_jd_prio prio; /**< Atom priority. Refer to @ref base_jd_prio for more details */
u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
- u8 padding[7];
+ u8 padding[5];
} base_jd_atom_v2;
-#if BASE_LEGACY_JD_API
-/* Structure definition works around the fact that C89 doesn't allow arrays of size 0 */
-typedef struct basep_jd_atom_ss {
- base_jd_atom atom;
- base_syncset syncsets[1];
-} basep_jd_atom_ss;
-#endif /* BASE_LEGACY_JD_API */
+#ifdef BASE_LEGACY_UK6_SUPPORT
+struct base_jd_atom_v2_uk6 {
+ u64 jc; /**< job-chain GPU address */
+ struct base_jd_udata udata; /**< user data */
+ kbase_pointer extres_list; /**< list of external resources */
+ u16 nr_extres; /**< nr of external resources */
+ base_jd_core_req core_req; /**< core requirements */
+ base_atom_id pre_dep[2]; /**< pre-dependencies */
+ base_atom_id atom_number; /**< unique number to identify the atom */
+ base_jd_prio prio; /**< priority - smaller is higher priority */
+ u8 device_nr; /**< coregroup when BASE_JD_REQ_SPECIFIC_COHERENT_GROUP specified */
+ u8 padding[7];
+};
+#endif
typedef enum base_external_resource_access {
BASE_EXT_RES_ACCESS_SHARED,
u64 ext_resource;
} base_external_resource;
-#if BASE_LEGACY_JD_API
-/* Structure definition works around the fact that C89 doesn't allow arrays of size 0 */
-typedef struct basep_jd_atom_ext_res {
- base_jd_atom atom;
- base_external_resource resources[1];
-} basep_jd_atom_ext_res;
-static INLINE size_t base_jd_atom_size_ex(u32 syncset_count, u32 external_res_count)
-{
- int size;
-
- LOCAL_ASSERT(0 == syncset_count || 0 == external_res_count);
+/**
+ * The maximum number of external resources which can be mapped/unmapped
+ * in a single request.
+ */
+#define BASE_EXT_RES_COUNT_MAX 10
- size = syncset_count ? offsetof(basep_jd_atom_ss, syncsets[0]) + (sizeof(base_syncset) * syncset_count) : external_res_count ? offsetof(basep_jd_atom_ext_res, resources[0]) + (sizeof(base_external_resource) * external_res_count) : sizeof(base_jd_atom);
+/**
+ * struct base_external_resource_list - Structure which describes a list of
+ * external resources.
+ * @count: The number of resources.
+ * @ext_res: Array of external resources which is
+ * sized at allocation time.
+ */
+struct base_external_resource_list {
+ u64 count;
+ struct base_external_resource ext_res[1];
+};
- /* Atom minimum size set to 64 bytes to ensure that the maximum
- * number of atoms in the ring buffer is limited to 256 */
- return MAX(64, size);
-}
+struct base_jd_debug_copy_buffer {
+ u64 address;
+ u64 size;
+};
/**
- * @brief Atom size evaluator
+ * @brief Setter for a dependency structure
*
- * This function returns the size in bytes of a ::base_jd_atom
- * containing @a n syncsets. It must be used to compute the size of a
- * bag before allocation.
+ * @param[in] dep The kbase jd atom dependency to be initialized.
+ * @param id The atom_id to be assigned.
+ * @param dep_type The dep_type to be assigned.
*
- * @param nr the number of syncsets for this atom
- * @return the atom size in bytes
*/
-static INLINE size_t base_jd_atom_size(u32 nr)
+static inline void base_jd_atom_dep_set(struct base_dependency *dep,
+ base_atom_id id, base_jd_dep_type dep_type)
{
- return base_jd_atom_size_ex(nr, 0);
+ LOCAL_ASSERT(dep != NULL);
+
+ /*
+ * make sure we don't set not allowed combinations
+ * of atom_id/dependency_type.
+ */
+ LOCAL_ASSERT((id == 0 && dep_type == BASE_JD_DEP_TYPE_INVALID) ||
+ (id > 0 && dep_type != BASE_JD_DEP_TYPE_INVALID));
+
+ dep->atom_id = id;
+ dep->dependency_type = dep_type;
}
/**
- * @brief Atom syncset accessor
+ * @brief Make a copy of a dependency structure
*
- * This function returns a pointer to the nth syncset allocated
- * together with an atom.
+ * @param[in,out] dep The kbase jd atom dependency to be written.
+ * @param[in] from The dependency to make a copy from.
*
- * @param[in] atom The allocated atom
- * @param n The number of the syncset to be returned
- * @return a pointer to the nth syncset.
*/
-static INLINE base_syncset *base_jd_get_atom_syncset(base_jd_atom *atom, u16 n)
+static inline void base_jd_atom_dep_copy(struct base_dependency *dep,
+ const struct base_dependency *from)
{
- LOCAL_ASSERT(atom != NULL);
- LOCAL_ASSERT(0 == (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES));
- LOCAL_ASSERT(n <= atom->nr_syncsets);
- return &((basep_jd_atom_ss *) atom)->syncsets[n];
+ LOCAL_ASSERT(dep != NULL);
+
+ base_jd_atom_dep_set(dep, from->atom_id, from->dependency_type);
}
-#endif /* BASE_LEGACY_JD_API */
/**
* @brief Soft-atom fence trigger setup.
* @param[out] atom A pre-allocated atom to configure as a fence trigger SW atom
* @param[in] fence The base fence object to trigger.
*/
-static INLINE void base_jd_fence_trigger_setup(base_jd_atom * const atom, base_fence *fence)
-{
- LOCAL_ASSERT(atom);
- LOCAL_ASSERT(fence);
- LOCAL_ASSERT(fence->basep.fd == INVALID_PLATFORM_FENCE);
- LOCAL_ASSERT(fence->basep.stream_fd >= 0);
- atom->jc = (uintptr_t) fence;
- atom->core_req = BASE_JD_REQ_SOFT_FENCE_TRIGGER;
-}
-
-static INLINE void base_jd_fence_trigger_setup_v2(base_jd_atom_v2 *atom, base_fence *fence)
+static inline void base_jd_fence_trigger_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
{
LOCAL_ASSERT(atom);
LOCAL_ASSERT(fence);
* @param[out] atom A pre-allocated atom to configure as a fence wait SW atom
* @param[in] fence The base fence object to wait on
*/
-static INLINE void base_jd_fence_wait_setup(base_jd_atom * const atom, base_fence *fence)
-{
- LOCAL_ASSERT(atom);
- LOCAL_ASSERT(fence);
- LOCAL_ASSERT(fence->basep.fd >= 0);
- atom->jc = (uintptr_t) fence;
- atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
-}
-
-static INLINE void base_jd_fence_wait_setup_v2(base_jd_atom_v2 *atom, base_fence *fence)
+static inline void base_jd_fence_wait_setup_v2(struct base_jd_atom_v2 *atom, struct base_fence *fence)
{
LOCAL_ASSERT(atom);
LOCAL_ASSERT(fence);
atom->core_req = BASE_JD_REQ_SOFT_FENCE_WAIT;
}
-#if BASE_LEGACY_JD_API
-/**
- * @brief Atom external resource accessor
- *
- * This functions returns a pointer to the nth external resource tracked by the atom.
- *
- * @param[in] atom The allocated atom
- * @param n The number of the external resource to return a pointer to
- * @return a pointer to the nth external resource
- */
-static INLINE base_external_resource * base_jd_get_external_resource(base_jd_atom *atom, u16 n)
-{
- LOCAL_ASSERT(atom != NULL);
- LOCAL_ASSERT(BASE_JD_REQ_EXTERNAL_RESOURCES == (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES));
- LOCAL_ASSERT(n <= atom->nr_extres);
- return &((basep_jd_atom_ext_res *) atom)->resources[n];
-}
-#endif /* BASE_LEGACY_JD_API */
-
/**
* @brief External resource info initialization.
*
- * Sets up a external resource object to reference
+ * Sets up an external resource object to reference
* a memory allocation and the type of access requested.
*
* @param[in] res The resource object to initialize
- * @param handle The handle to the imported memory object
+ * @param handle The handle to the imported memory object, must be
+ * obtained by calling @ref base_mem_as_import_handle().
* @param access The type of access requested
*/
-static INLINE void base_external_resource_init(base_external_resource * res, base_import_handle handle, base_external_resource_access access)
+static inline void base_external_resource_init(struct base_external_resource *res, struct base_import_handle handle, base_external_resource_access access)
{
- mali_addr64 address;
+ u64 address;
+
address = handle.basep.handle;
LOCAL_ASSERT(res != NULL);
res->ext_resource = address | (access & LOCAL_PAGE_LSB);
}
-#if BASE_LEGACY_JD_API
-/**
- * @brief Next atom accessor
- *
- * This function returns a pointer to the next allocated atom. It
- * relies on the fact that the current atom has been correctly
- * initialized (relies on the base_jd_atom::nr_syncsets field).
- *
- * @param[in] atom The allocated atom
- * @return a pointer to the next atom.
- */
-static INLINE base_jd_atom *base_jd_get_next_atom(base_jd_atom *atom)
-{
- LOCAL_ASSERT(atom != NULL);
- return (atom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) ? (base_jd_atom *) base_jd_get_external_resource(atom, atom->nr_extres) : (base_jd_atom *) base_jd_get_atom_syncset(atom, atom->nr_syncsets);
-}
-#endif /* BASE_LEGACY_JD_API */
-
/**
* @brief Job chain event code bits
* Defines the bits used to create ::base_jd_event_code
BASE_JD_EVENT_ACCESS_FLAG = 0xD8,
/* SW defined exceptions */
- BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
- BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
- BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
- BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
- BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+ BASE_JD_EVENT_MEM_GROWTH_FAILED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x000,
+ BASE_JD_EVENT_TIMED_OUT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x001,
+ BASE_JD_EVENT_JOB_CANCELLED = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x002,
+ BASE_JD_EVENT_JOB_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x003,
+ BASE_JD_EVENT_PM_EVENT = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x004,
+ BASE_JD_EVENT_FORCE_REPLAY = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_JOB | 0x005,
- BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
+ BASE_JD_EVENT_BAG_INVALID = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_BAG | 0x003,
/** End of HW fault and SW Error status codes */
BASE_JD_EVENT_RANGE_HW_FAULT_OR_SW_ERROR_END = BASE_JD_SW_EVENT | BASE_JD_SW_EVENT_RESERVED | 0x3FF,
* been completed (ie all contained job-chains have been completed).
* @li ::BASE_JD_SW_EVENT_INFO : base_jd_event::data not used
*/
-#if BASE_LEGACY_JD_API
-typedef struct base_jd_event {
- base_jd_event_code event_code; /**< event code */
- void *data; /**< event specific data */
-} base_jd_event;
-#endif
-
typedef struct base_jd_event_v2 {
- base_jd_event_code event_code; /**< event code */
- base_atom_id atom_number; /**< the atom number that has completed */
- base_jd_udata udata; /**< user data */
+ base_jd_event_code event_code; /**< event code */
+ base_atom_id atom_number; /**< the atom number that has completed */
+ struct base_jd_udata udata; /**< user data */
} base_jd_event_v2;
/**
- * Padding required to ensure that the @ref base_dump_cpu_gpu_counters structure fills
+ * Padding required to ensure that the @ref struct base_dump_cpu_gpu_counters structure fills
* a full cache line.
*/
/** @} end group base_user_api_job_dispatch */
-#ifdef __KERNEL__
-/*
- * The following typedefs should be removed when a midg types header is added.
- * See MIDCOM-1657 for details.
- */
-typedef u32 midg_product_id;
-typedef u32 midg_cache_features;
-typedef u32 midg_tiler_features;
-typedef u32 midg_mem_features;
-typedef u32 midg_mmu_features;
-typedef u32 midg_js_features;
-typedef u32 midg_as_present;
-typedef u32 midg_js_present;
-
-#define MIDG_MAX_JOB_SLOTS 16
-
-#else
-#include <midg/mali_midg.h>
-#endif
+#define GPU_MAX_JOB_SLOTS 16
/**
* @page page_base_user_api_gpuprops User-side Base GPU Property Query API
*
* There is a related third module outside of Base, which is owned by the MIDG
* module:
- * - @ref midg_gpuprops_static "Midgard Compile-time GPU Properties"
+ * - @ref gpu_props_static "Midgard Compile-time GPU Properties"
*
* Base only deals with properties that vary between different Midgard
* implementations - the Dynamic GPU properties and the Platform Config
* Dynamic GPU properties are presented in two sets:
* -# the commonly used properties in @ref base_gpu_props, which have been
* unpacked from GPU register bitfields.
- * -# The full set of raw, unprocessed properties in @ref midg_raw_gpu_props
+ * -# The full set of raw, unprocessed properties in @ref gpu_raw_gpu_props
* (also a member of @ref base_gpu_props). All of these are presented in
* the packed form, as presented by the GPU registers themselves.
*
- * @usecase The raw properties in @ref midg_raw_gpu_props are necessary to
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
* allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
* behaving differently?". In this case, all information about the
* configuration is potentially useful, but it <b>does not need to be processed
size="6,6"
"mali_base.h";
- "midg/midg.h";
+ "gpu/mali_gpu.h";
node [ shape=box ];
{
rank = same; ordering = out;
- "midg/midg_gpu_props.h";
+ "gpu/mali_gpu_props.h";
"base/midg_gpus/mali_t600.h";
"base/midg_gpus/other_midg_gpu.h";
}
{ rank = same; "plat/plat_config.h"; }
{
rank = same;
- "midg/midg.h" [ shape=box ];
+ "gpu/mali_gpu.h" [ shape=box ];
gpu_chooser [ label="" style="invisible" width=0 height=0 fixedsize=true ];
select_gpu [ label="Mali-T600 | Other\n(select_gpu.h)" shape=polygon,sides=4,distortion=0.25 width=3.3 height=0.99 fixedsize=true ] ;
}
{ rank = same; "plat/plat_config.h"; }
{ rank = same; "mali_base.h"; }
- "mali_base.h" -> "midg/midg.h" -> "midg/midg_gpu_props.h";
+ "mali_base.h" -> "gpu/mali_gpu.h" -> "gpu/mali_gpu_props.h";
"mali_base.h" -> "plat/plat_config.h" ;
"mali_base.h" -> select_gpu ;
* @section sec_base_user_api_gpuprops_cocalc Coherency Group calculation
* Creation of the coherent group data is done at device-driver startup, and so
* is one-time. This will most likely involve a loop with CLZ, shifting, and
- * bit clearing on the L2_PRESENT or L3_PRESENT masks, depending on whether the
- * system is L2 or L2+L3 Coherent. The number of shader cores is done by a
+ * bit clearing on the L2_PRESENT mask, depending on whether the
+ * system is L2 Coherent. The number of shader cores is done by a
* population count, since faulty cores may be disabled during production,
* producing a non-contiguous mask.
*
* The memory requirements for this algoirthm can be determined either by a u64
- * population count on the L2/L3_PRESENT masks (a LUT helper already is
+ * population count on the L2_PRESENT mask (a LUT helper already is
* requried for the above), or simple assumption that there can be no more than
* 16 coherent groups, since core groups are typically 4 cores.
*/
/**
* Product specific value.
*/
- midg_product_id product_id;
+ u32 product_id;
/**
* Status of the GPU release.
u8 padding[5];
};
-struct mali_base_gpu_l3_cache_props {
- u8 log2_line_size;
- u8 log2_cache_size;
- u8 padding[6];
-};
-
struct mali_base_gpu_tiler_props {
u32 bin_size_bytes; /* Max is 4*2^15 */
u32 max_active_levels; /* Max is 2^15 */
};
/**
- * GPU threading system details.
+ * GPU threading system details.
*/
struct mali_base_gpu_thread_props {
- u32 max_threads; /* Max. number of threads per core */
+ u32 max_threads; /* Max. number of threads per core */
u32 max_workgroup_size; /* Max. number of threads per workgroup */
u32 max_barrier_size; /* Max. number of threads that can synchronize on a simple barrier */
- u16 max_registers; /* Total size [1..65535] of the register file available per core. */
- u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
- u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
- u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
+ u16 max_registers; /* Total size [1..65535] of the register file available per core. */
+ u8 max_task_queue; /* Max. tasks [1..255] which may be sent to a core before it becomes blocked. */
+ u8 max_thread_group_split; /* Max. allowed value [1..15] of the Thread Group Split field. */
+ u8 impl_tech; /* 0 = Not specified, 1 = Silicon, 2 = FPGA, 3 = SW Model/Emulation */
u8 padding[7];
};
* provides a cached population-count for that mask.
*
* @note Whilst all cores are exposed in the mask, not all may be available to
- * the application, depending on the Kernel Job Scheduler policy. Therefore,
- * the application should not further restrict the core mask itself, as it may
- * result in an empty core mask. However, it can guarentee that there will be
- * at least one core available for each core group exposed .
- *
- * @usecase Chains marked at certain user-side priorities (e.g. the Long-running
- * (batch) priority ) can be prevented from running on entire core groups by the
- * Kernel Chain Scheduler policy.
+ * the application, depending on the Kernel Power policy.
*
* @note if u64s must be 8-byte aligned, then this structure has 32-bits of wastage.
*/
u32 num_core_groups;
/**
- * Coherency features of the memory, accessed by @ref midg_mem_features
+ * Coherency features of the memory, accessed by @ref gpu_mem_features
* methods
*/
- midg_mem_features coherency;
+ u32 coherency;
u32 padding;
* the values should be better expressed in an unpacked form in the
* base_gpu_props structure.
*
- * @usecase The raw properties in @ref midg_raw_gpu_props are necessary to
+ * @usecase The raw properties in @ref gpu_raw_gpu_props are necessary to
* allow a user of the Mali Tools (e.g. PAT) to determine "Why is this device
* behaving differently?". In this case, all information about the
* configuration is potentially useful, but it <b>does not need to be processed
* Tools software on the host PC.
*
*/
-struct midg_raw_gpu_props {
+struct gpu_raw_gpu_props {
u64 shader_present;
u64 tiler_present;
u64 l2_present;
- u64 l3_present;
+ u64 unused_1; /* keep for backward compatibility */
- midg_cache_features l2_features;
- midg_cache_features l3_features;
- midg_mem_features mem_features;
- midg_mmu_features mmu_features;
+ u32 l2_features;
+ u32 suspend_size; /* API 8.2+ */
+ u32 mem_features;
+ u32 mmu_features;
- midg_as_present as_present;
+ u32 as_present;
u32 js_present;
- midg_js_features js_features[MIDG_MAX_JOB_SLOTS];
- midg_tiler_features tiler_features;
+ u32 js_features[GPU_MAX_JOB_SLOTS];
+ u32 tiler_features;
u32 texture_features[3];
u32 gpu_id;
-
+
u32 thread_max_threads;
u32 thread_max_workgroup_size;
u32 thread_max_barrier_size;
u32 thread_features;
- u32 padding;
+ /*
+ * Note: This is the _selected_ coherency mode rather than the
+ * available modes as exposed in the coherency_features register.
+ */
+ u32 coherency_mode;
};
/**
typedef struct mali_base_gpu_props {
struct mali_base_gpu_core_props core_props;
struct mali_base_gpu_l2_cache_props l2_props;
- struct mali_base_gpu_l3_cache_props l3_props;
+ u64 unused_1; /* keep for backwards compatibility */
struct mali_base_gpu_tiler_props tiler_props;
struct mali_base_gpu_thread_props thread_props;
/** This member is large, likely to be 128 bytes */
- struct midg_raw_gpu_props raw_props;
+ struct gpu_raw_gpu_props raw_props;
/** This must be last member of the structure */
struct mali_base_gpu_coherent_group_info coherency_info;
/** Base context is a 'System Monitor' context for Hardware counters.
*
* One important side effect of this is that job submission is disabled. */
- BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1),
-
- /** Base context flag indicating a 'hint' that this context uses Compute
- * Jobs only.
- *
- * Specifially, this means that it only sends atoms that <b>do not</b>
- * contain the following @ref base_jd_core_req :
- * - BASE_JD_REQ_FS
- * - BASE_JD_REQ_T
- *
- * Violation of these requirements will cause the Job-Chains to be rejected.
- *
- * In addition, it is inadvisable for the atom's Job-Chains to contain Jobs
- * of the following @ref midg_job_type (whilst it may work now, it may not
- * work in future) :
- * - @ref MIDG_JOB_VERTEX
- * - @ref MIDG_JOB_GEOMETRY
- *
- * @note An alternative to using this is to specify the BASE_JD_REQ_ONLY_COMPUTE
- * requirement in atoms.
- */
- BASE_CONTEXT_HINT_ONLY_COMPUTE = (1u << 2)
+ BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED = (1u << 1)
};
/**
*/
#define BASE_CONTEXT_CREATE_ALLOWED_FLAGS \
(((u32)BASE_CONTEXT_CCTX_EMBEDDED) | \
- ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) | \
- ((u32)BASE_CONTEXT_HINT_ONLY_COMPUTE))
+ ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED))
/**
* Bitpattern describing the ::base_context_create_flags that can be passed to the kernel
*/
#define BASE_CONTEXT_CREATE_KERNEL_FLAGS \
- (((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED) | \
- ((u32)BASE_CONTEXT_HINT_ONLY_COMPUTE))
+ ((u32)BASE_CONTEXT_SYSTEM_MONITOR_SUBMIT_DISABLED)
/**
* Private flags used on the base context
* @addtogroup base_api Base APIs
* @{
*/
-/**
- * @addtogroup basecpuprops Base CPU Properties
- * @{
- */
-
-/**
- * @brief CPU Property Flag for base_cpu_props::cpu_flags, indicating a
- * Little Endian System. If not set in base_cpu_props::cpu_flags, then the
- * system is Big Endian.
- *
- * The compile-time equivalent is @ref OSU_CONFIG_CPU_LITTLE_ENDIAN.
- */
-#define BASE_CPU_PROPERTY_FLAG_LITTLE_ENDIAN F_BIT_0
-
-
-/**
- * @brief Platform dynamic CPU ID properties structure
- */
-typedef struct base_cpu_id_props
-{
- /**
- * CPU ID
- */
- u32 id;
-
- /**
- * CPU Part number
- */
- u16 part;
-
- /**
- * ASCII code of implementer trademark
- */
- u8 implementer;
-
- /**
- * CPU Variant
- */
- u8 variant;
-
- /**
- * CPU Architecture
- */
- u8 arch;
-
- /**
- * CPU revision
- */
- u8 rev;
-
- /**
- Validity of CPU id where 0-invalid and
- 1-valid only if ALL the cpu_id props are valid
- */
- u8 valid;
-
- u8 padding[1];
-}base_cpu_id_props;
-
-
-/** @brief Platform Dynamic CPU properties structure */
-typedef struct base_cpu_props {
- u32 nr_cores; /**< Number of CPU cores */
-
- /**
- * CPU page size as a Logarithm to Base 2. The compile-time
- * equivalent is @ref OSU_CONFIG_CPU_PAGE_SIZE_LOG2
- */
- u32 cpu_page_size_log2;
-
- /**
- * CPU L1 Data cache line size as a Logarithm to Base 2. The compile-time
- * equivalent is @ref OSU_CONFIG_CPU_L1_DCACHE_LINE_SIZE_LOG2.
- */
- u32 cpu_l1_dcache_line_size_log2;
-
- /**
- * CPU L1 Data cache size, in bytes. The compile-time equivalient is
- * @ref OSU_CONFIG_CPU_L1_DCACHE_SIZE.
- *
- * This CPU Property is mainly provided to implement OpenCL's
- * clGetDeviceInfo(), which allows the CL_DEVICE_GLOBAL_MEM_CACHE_SIZE
- * hint to be queried.
- */
- u32 cpu_l1_dcache_size;
-
- /**
- * CPU Property Flags bitpattern.
- *
- * This is a combination of bits as specified by the macros prefixed with
- * 'BASE_CPU_PROPERTY_FLAG_'.
- */
- u32 cpu_flags;
-
- /**
- * Maximum clock speed in MHz.
- * @usecase 'Maximum' CPU Clock Speed information is required by OpenCL's
- * clGetDeviceInfo() function for the CL_DEVICE_MAX_CLOCK_FREQUENCY hint.
- */
- u32 max_cpu_clock_speed_mhz;
-
- /**
- * @brief Total memory, in bytes.
- *
- * This is the theoretical maximum memory available to the CPU. It is
- * unlikely that a client will be able to allocate all of this memory for
- * their own purposes, but this at least provides an upper bound on the
- * memory available to the CPU.
- *
- * This is required for OpenCL's clGetDeviceInfo() call when
- * CL_DEVICE_GLOBAL_MEM_SIZE is requested, for OpenCL CPU devices.
- */
- u64 available_memory_size;
-
- /**
- * CPU ID detailed info
- */
- base_cpu_id_props cpu_id;
-
- u32 padding;
-} base_cpu_props;
-/** @} end group basecpuprops */
/**
* @brief The payload for a replay job. This must be in GPU memory.
* will be replayed in @b reverse order (so that extra ones can be added
* to the head in future soft jobs without affecting this soft job)
*/
- mali_addr64 tiler_jc_list;
+ u64 tiler_jc_list;
/**
* Pointer to the fragment job chain.
*/
- mali_addr64 fragment_jc;
+ u64 fragment_jc;
/**
* Pointer to the tiler heap free FBD field to be modified.
*/
- mali_addr64 tiler_heap_free;
+ u64 tiler_heap_free;
/**
* Hierarchy mask for the replayed fragment jobs. May be zero.
* Pointer to next entry in the list. A setting of NULL indicates the
* end of the list.
*/
- mali_addr64 next;
+ u64 next;
/**
* Pointer to the job chain.
*/
- mali_addr64 jc;
+ u64 jc;
} base_jd_replay_jc;