3 * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 * HW access job manager common APIs
23 #ifndef _KBASE_HWACCESS_JM_H_
24 #define _KBASE_HWACCESS_JM_H_
27 * kbase_backend_run_atom() - Run an atom on the GPU
28 * @kbdev: Device pointer
31 * Caller must hold the HW access lock
33 void kbase_backend_run_atom(struct kbase_device *kbdev,
34 struct kbase_jd_atom *katom);
37 * kbase_backend_find_free_address_space() - Find a free address space.
38 * @kbdev: Device pointer
39 * @kctx: Context pointer
41 * If no address spaces are currently free, then this function can evict an
42 * idle context from the runpool, freeing up the address space it was using.
44 * The address space is marked as in use. The caller must either assign a
45 * context using kbase_gpu_use_ctx(), or release it using
46 * kbase_gpu_release_free_address_space()
48 * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
51 int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
52 struct kbase_context *kctx);
55 * kbase_backend_release_free_address_space() - Release an address space.
56 * @kbdev: Device pointer
57 * @as_nr: Address space to release
59 * The address space must have been returned by
60 * kbase_gpu_find_free_address_space().
62 void kbase_backend_release_free_address_space(struct kbase_device *kbdev,
66 * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
67 * provided address space.
68 * @kbdev: Device pointer
69 * @kctx: Context pointer. May be NULL
70 * @as_nr: Free address space to use
72 * kbase_gpu_next_job() will pull atoms from the active context.
74 * Return: true if successful, false if ASID not assigned. If kctx->as_pending
75 * is true then ASID assignment will complete at some point in the
76 * future and will re-start scheduling, otherwise no ASIDs are available
78 bool kbase_backend_use_ctx(struct kbase_device *kbdev,
79 struct kbase_context *kctx,
83 * kbase_backend_use_ctx_sched() - Activate a context.
84 * @kbdev: Device pointer
85 * @kctx: Context pointer
87 * kbase_gpu_next_job() will pull atoms from the active context.
89 * The context must already be scheduled and assigned to an address space. If
90 * the context is not scheduled, then kbase_gpu_use_ctx() should be used
93 * Caller must hold runpool_irq.lock
95 * Return: true if context is now active, false otherwise (ie if context does
96 * not have an address space assigned)
98 bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
99 struct kbase_context *kctx);
102 * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
103 * de-assign the assigned address space.
104 * @kbdev: Device pointer
105 * @kctx: Context pointer
107 * Caller must hold as->transaction_mutex and runpool_irq.lock
109 void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
110 struct kbase_context *kctx);
113 * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
114 * de-assign the assigned address space.
115 * @kbdev: Device pointer
116 * @kctx: Context pointer
118 * Caller must hold as->transaction_mutex
120 * This function must perform any operations that could not be performed in IRQ
121 * context by kbase_backend_release_ctx_irq().
123 void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
124 struct kbase_context *kctx);
127 * kbase_backend_complete_wq() - Perform backend-specific actions required on
128 * completing an atom.
129 * @kbdev: Device pointer
130 * @katom: Pointer to the atom to complete
132 * This function should only be called from kbase_jd_done_worker() or
133 * js_return_worker().
135 * Return: true if atom has completed, false if atom should be re-submitted
137 void kbase_backend_complete_wq(struct kbase_device *kbdev,
138 struct kbase_jd_atom *katom);
141 * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
142 * required on completing an atom, after
143 * any scheduling has taken place.
144 * @kbdev: Device pointer
145 * @core_req: Core requirements of atom
146 * @affinity: Affinity of atom
147 * @coreref_state: Coreref state of atom
149 * This function should only be called from kbase_jd_done_worker() or
150 * js_return_worker().
152 void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
153 base_jd_core_req core_req, u64 affinity,
154 enum kbase_atom_coreref_state coreref_state);
157 * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
158 * and remove any others from the ringbuffers.
159 * @kbdev: Device pointer
160 * @end_timestamp: Timestamp of reset
162 void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
165 * kbase_backend_inspect_head() - Return the atom currently at the head of slot
167 * @kbdev: Device pointer
168 * @js: Job slot to inspect
170 * Return : Atom currently at the head of slot @js, or NULL
172 struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
176 * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
178 * @kbdev: Device pointer
179 * @js: Job slot to inspect
181 * Return : Atom currently at the head of slot @js, or NULL
183 struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
187 * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
189 * @kbdev: Device pointer
190 * @js: Job slot to inspect
192 * Return : Number of atoms currently on slot
194 int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
197 * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
198 * that are currently on the GPU.
199 * @kbdev: Device pointer
200 * @js: Job slot to inspect
202 * Return : Number of atoms currently on slot @js that are currently on the GPU.
204 int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
207 * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
209 * @kbdev: Device pointer
211 * Perform any required backend-specific actions (eg starting/stopping
212 * scheduling timers).
214 void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
217 * kbase_backend_slot_free() - Return the number of jobs that can be currently
218 * submitted to slot @js.
219 * @kbdev: Device pointer
220 * @js: Job slot to inspect
222 * Return : Number of jobs that can be submitted.
224 int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
227 * kbase_job_check_enter_disjoint - potentially leave disjoint state
228 * @kbdev: kbase device
229 * @target_katom: atom which is finishing
231 * Work out whether to leave disjoint state when finishing an atom that was
232 * originated by kbase_job_check_enter_disjoint().
234 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
235 struct kbase_jd_atom *target_katom);
238 * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
239 * running from a context
240 * @kctx: Context pointer
242 * This is used in response to a page fault to remove all jobs from the faulting
243 * context from the hardware.
245 void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
248 * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
250 * @kctx: Context pointer
252 * This should be called following kbase_js_zap_context(), to ensure the context
253 * can be safely destroyed.
255 void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
258 * kbase_backend_get_current_flush_id - Return the current flush ID
260 * @kbdev: Device pointer
262 * Return: the current flush ID to be recorded for each job chain
264 u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
266 #if KBASE_GPU_RESET_EN
268 * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
269 * @kbdev: Device pointer
271 * This function just soft-stops all the slots to ensure that as many jobs as
272 * possible are saved.
274 * Return: a boolean which should be interpreted as follows:
275 * - true - Prepared for reset, kbase_reset_gpu should be called.
276 * - false - Another thread is performing a reset, kbase_reset_gpu should
279 bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
282 * kbase_reset_gpu - Reset the GPU
283 * @kbdev: Device pointer
285 * This function should be called after kbase_prepare_to_reset_gpu if it returns
286 * true. It should never be called without a corresponding call to
287 * kbase_prepare_to_reset_gpu.
289 * After this function is called (or not called if kbase_prepare_to_reset_gpu
290 * returned false), the caller should wait for kbdev->reset_waitq to be
291 * signalled to know when the reset has completed.
293 void kbase_reset_gpu(struct kbase_device *kbdev);
296 * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
297 * @kbdev: Device pointer
299 * This function just soft-stops all the slots to ensure that as many jobs as
300 * possible are saved.
302 * Return: a boolean which should be interpreted as follows:
303 * - true - Prepared for reset, kbase_reset_gpu should be called.
304 * - false - Another thread is performing a reset, kbase_reset_gpu should
307 bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
310 * kbase_reset_gpu_locked - Reset the GPU
311 * @kbdev: Device pointer
313 * This function should be called after kbase_prepare_to_reset_gpu if it
314 * returns true. It should never be called without a corresponding call to
315 * kbase_prepare_to_reset_gpu.
317 * After this function is called (or not called if kbase_prepare_to_reset_gpu
318 * returned false), the caller should wait for kbdev->reset_waitq to be
319 * signalled to know when the reset has completed.
321 void kbase_reset_gpu_locked(struct kbase_device *kbdev);
325 * kbase_job_slot_hardstop - Hard-stop the specified job slot
326 * @kctx: The kbase context that contains the job(s) that should
328 * @js: The job slot to hard-stop
329 * @target_katom: The job that should be hard-stopped (or NULL for all
330 * jobs from the context)
332 * The job slot lock must be held when calling this function.
334 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
335 struct kbase_jd_atom *target_katom);
337 #endif /* _KBASE_HWACCESS_JM_H_ */