3 * (C) COPYRIGHT 2014-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 * HW access job manager common APIs
23 #ifndef _KBASE_HWACCESS_JM_H_
24 #define _KBASE_HWACCESS_JM_H_
27 * kbase_backend_run_atom() - Run an atom on the GPU
28 * @kbdev: Device pointer
31 * Caller must hold the HW access lock
33 void kbase_backend_run_atom(struct kbase_device *kbdev,
34 struct kbase_jd_atom *katom);
37 * kbase_backend_slot_update - Update state based on slot ringbuffers
39 * @kbdev: Device pointer
41 * Inspect the jobs in the slot ringbuffers and update state.
43 * This will cause jobs to be submitted to hardware if they are unblocked
45 void kbase_backend_slot_update(struct kbase_device *kbdev);
48 * kbase_backend_find_free_address_space() - Find a free address space.
49 * @kbdev: Device pointer
50 * @kctx: Context pointer
52 * If no address spaces are currently free, then this function can evict an
53 * idle context from the runpool, freeing up the address space it was using.
55 * The address space is marked as in use. The caller must either assign a
56 * context using kbase_gpu_use_ctx(), or release it using
57 * kbase_gpu_release_free_address_space()
59 * Return: Number of free address space, or KBASEP_AS_NR_INVALID if none
62 int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
63 struct kbase_context *kctx);
66 * kbase_backend_release_free_address_space() - Release an address space.
67 * @kbdev: Device pointer
68 * @as_nr: Address space to release
70 * The address space must have been returned by
71 * kbase_gpu_find_free_address_space().
73 void kbase_backend_release_free_address_space(struct kbase_device *kbdev,
77 * kbase_backend_use_ctx() - Activate a currently unscheduled context, using the
78 * provided address space.
79 * @kbdev: Device pointer
80 * @kctx: Context pointer. May be NULL
81 * @as_nr: Free address space to use
83 * kbase_gpu_next_job() will pull atoms from the active context.
85 * Return: true if successful, false if ASID not assigned.
87 bool kbase_backend_use_ctx(struct kbase_device *kbdev,
88 struct kbase_context *kctx,
92 * kbase_backend_use_ctx_sched() - Activate a context.
93 * @kbdev: Device pointer
94 * @kctx: Context pointer
96 * kbase_gpu_next_job() will pull atoms from the active context.
98 * The context must already be scheduled and assigned to an address space. If
99 * the context is not scheduled, then kbase_gpu_use_ctx() should be used
102 * Caller must hold hwaccess_lock
104 * Return: true if context is now active, false otherwise (ie if context does
105 * not have an address space assigned)
107 bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
108 struct kbase_context *kctx);
111 * kbase_backend_release_ctx_irq - Release a context from the GPU. This will
112 * de-assign the assigned address space.
113 * @kbdev: Device pointer
114 * @kctx: Context pointer
116 * Caller must hold kbase_device->mmu_hw_mutex and hwaccess_lock
118 void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
119 struct kbase_context *kctx);
122 * kbase_backend_release_ctx_noirq - Release a context from the GPU. This will
123 * de-assign the assigned address space.
124 * @kbdev: Device pointer
125 * @kctx: Context pointer
127 * Caller must hold kbase_device->mmu_hw_mutex
129 * This function must perform any operations that could not be performed in IRQ
130 * context by kbase_backend_release_ctx_irq().
132 void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
133 struct kbase_context *kctx);
136 * kbase_backend_complete_wq() - Perform backend-specific actions required on
137 * completing an atom.
138 * @kbdev: Device pointer
139 * @katom: Pointer to the atom to complete
141 * This function should only be called from kbase_jd_done_worker() or
142 * js_return_worker().
144 * Return: true if atom has completed, false if atom should be re-submitted
146 void kbase_backend_complete_wq(struct kbase_device *kbdev,
147 struct kbase_jd_atom *katom);
150 * kbase_backend_complete_wq_post_sched - Perform backend-specific actions
151 * required on completing an atom, after
152 * any scheduling has taken place.
153 * @kbdev: Device pointer
154 * @core_req: Core requirements of atom
155 * @affinity: Affinity of atom
156 * @coreref_state: Coreref state of atom
158 * This function should only be called from kbase_jd_done_worker() or
159 * js_return_worker().
161 void kbase_backend_complete_wq_post_sched(struct kbase_device *kbdev,
162 base_jd_core_req core_req, u64 affinity,
163 enum kbase_atom_coreref_state coreref_state);
166 * kbase_backend_reset() - The GPU is being reset. Cancel all jobs on the GPU
167 * and remove any others from the ringbuffers.
168 * @kbdev: Device pointer
169 * @end_timestamp: Timestamp of reset
171 void kbase_backend_reset(struct kbase_device *kbdev, ktime_t *end_timestamp);
174 * kbase_backend_inspect_head() - Return the atom currently at the head of slot
176 * @kbdev: Device pointer
177 * @js: Job slot to inspect
179 * Return : Atom currently at the head of slot @js, or NULL
181 struct kbase_jd_atom *kbase_backend_inspect_head(struct kbase_device *kbdev,
185 * kbase_backend_inspect_tail - Return the atom currently at the tail of slot
187 * @kbdev: Device pointer
188 * @js: Job slot to inspect
190 * Return : Atom currently at the head of slot @js, or NULL
192 struct kbase_jd_atom *kbase_backend_inspect_tail(struct kbase_device *kbdev,
196 * kbase_backend_nr_atoms_on_slot() - Return the number of atoms currently on a
198 * @kbdev: Device pointer
199 * @js: Job slot to inspect
201 * Return : Number of atoms currently on slot
203 int kbase_backend_nr_atoms_on_slot(struct kbase_device *kbdev, int js);
206 * kbase_backend_nr_atoms_submitted() - Return the number of atoms on a slot
207 * that are currently on the GPU.
208 * @kbdev: Device pointer
209 * @js: Job slot to inspect
211 * Return : Number of atoms currently on slot @js that are currently on the GPU.
213 int kbase_backend_nr_atoms_submitted(struct kbase_device *kbdev, int js);
216 * kbase_backend_ctx_count_changed() - Number of contexts ready to submit jobs
218 * @kbdev: Device pointer
220 * Perform any required backend-specific actions (eg starting/stopping
221 * scheduling timers).
223 void kbase_backend_ctx_count_changed(struct kbase_device *kbdev);
226 * kbase_backend_timeouts_changed() - Job Scheduler timeouts have changed.
227 * @kbdev: Device pointer
229 * Perform any required backend-specific actions (eg updating timeouts of
230 * currently running atoms).
232 void kbase_backend_timeouts_changed(struct kbase_device *kbdev);
235 * kbase_backend_slot_free() - Return the number of jobs that can be currently
236 * submitted to slot @js.
237 * @kbdev: Device pointer
238 * @js: Job slot to inspect
240 * Return : Number of jobs that can be submitted.
242 int kbase_backend_slot_free(struct kbase_device *kbdev, int js);
245 * kbase_job_check_enter_disjoint - potentially leave disjoint state
246 * @kbdev: kbase device
247 * @target_katom: atom which is finishing
249 * Work out whether to leave disjoint state when finishing an atom that was
250 * originated by kbase_job_check_enter_disjoint().
252 void kbase_job_check_leave_disjoint(struct kbase_device *kbdev,
253 struct kbase_jd_atom *target_katom);
256 * kbase_backend_jm_kill_jobs_from_kctx - Kill all jobs that are currently
257 * running from a context
258 * @kctx: Context pointer
260 * This is used in response to a page fault to remove all jobs from the faulting
261 * context from the hardware.
263 void kbase_backend_jm_kill_jobs_from_kctx(struct kbase_context *kctx);
266 * kbase_jm_wait_for_zero_jobs - Wait for context to have zero jobs running, and
268 * @kctx: Context pointer
270 * This should be called following kbase_js_zap_context(), to ensure the context
271 * can be safely destroyed.
273 void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx);
276 * kbase_backend_get_current_flush_id - Return the current flush ID
278 * @kbdev: Device pointer
280 * Return: the current flush ID to be recorded for each job chain
282 u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev);
284 #if KBASE_GPU_RESET_EN
286 * kbase_prepare_to_reset_gpu - Prepare for resetting the GPU.
287 * @kbdev: Device pointer
289 * This function just soft-stops all the slots to ensure that as many jobs as
290 * possible are saved.
292 * Return: a boolean which should be interpreted as follows:
293 * - true - Prepared for reset, kbase_reset_gpu should be called.
294 * - false - Another thread is performing a reset, kbase_reset_gpu should
297 bool kbase_prepare_to_reset_gpu(struct kbase_device *kbdev);
300 * kbase_reset_gpu - Reset the GPU
301 * @kbdev: Device pointer
303 * This function should be called after kbase_prepare_to_reset_gpu if it returns
304 * true. It should never be called without a corresponding call to
305 * kbase_prepare_to_reset_gpu.
307 * After this function is called (or not called if kbase_prepare_to_reset_gpu
308 * returned false), the caller should wait for kbdev->reset_waitq to be
309 * signalled to know when the reset has completed.
311 void kbase_reset_gpu(struct kbase_device *kbdev);
314 * kbase_prepare_to_reset_gpu_locked - Prepare for resetting the GPU.
315 * @kbdev: Device pointer
317 * This function just soft-stops all the slots to ensure that as many jobs as
318 * possible are saved.
320 * Return: a boolean which should be interpreted as follows:
321 * - true - Prepared for reset, kbase_reset_gpu should be called.
322 * - false - Another thread is performing a reset, kbase_reset_gpu should
325 bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev);
328 * kbase_reset_gpu_locked - Reset the GPU
329 * @kbdev: Device pointer
331 * This function should be called after kbase_prepare_to_reset_gpu if it
332 * returns true. It should never be called without a corresponding call to
333 * kbase_prepare_to_reset_gpu.
335 * After this function is called (or not called if kbase_prepare_to_reset_gpu
336 * returned false), the caller should wait for kbdev->reset_waitq to be
337 * signalled to know when the reset has completed.
339 void kbase_reset_gpu_locked(struct kbase_device *kbdev);
342 * kbase_reset_gpu_silent - Reset the GPU silently
343 * @kbdev: Device pointer
345 * Reset the GPU without trying to cancel jobs and don't emit messages into
346 * the kernel log while doing the reset.
348 * This function should be used in cases where we are doing a controlled reset
349 * of the GPU as part of normal processing (e.g. exiting protected mode) where
350 * the driver will have ensured the scheduler has been idled and all other
351 * users of the GPU (e.g. instrumentation) have been suspended.
353 void kbase_reset_gpu_silent(struct kbase_device *kbdev);
356 * kbase_reset_gpu_active - Reports if the GPU is being reset
357 * @kbdev: Device pointer
359 * Return: True if the GPU is in the process of being reset.
361 bool kbase_reset_gpu_active(struct kbase_device *kbdev);
365 * kbase_job_slot_hardstop - Hard-stop the specified job slot
366 * @kctx: The kbase context that contains the job(s) that should
368 * @js: The job slot to hard-stop
369 * @target_katom: The job that should be hard-stopped (or NULL for all
370 * jobs from the context)
372 * The job slot lock must be held when calling this function.
374 void kbase_job_slot_hardstop(struct kbase_context *kctx, int js,
375 struct kbase_jd_atom *target_katom);
377 #endif /* _KBASE_HWACCESS_JM_H_ */