3 * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 * Register backend context / address space management
23 #include <mali_kbase.h>
24 #include <mali_kbase_hwaccess_jm.h>
27 * assign_and_activate_kctx_addr_space - Assign an AS to a context
28 * @kbdev: Kbase device
29 * @kctx: Kbase context
30 * @current_as: Address Space to assign
32 * Assign an Address Space (AS) to a context, and add the context to the Policy.
35 * setting up the global runpool_irq structure and the context on the AS,
36 * Activating the MMU on the AS,
37 * Allowing jobs to be submitted on the AS.
40 * kbasep_js_kctx_info.jsctx_mutex held,
41 * kbasep_js_device_data.runpool_mutex held,
42 * AS transaction mutex held,
43 * Runpool IRQ lock held
45 static void assign_and_activate_kctx_addr_space(struct kbase_device *kbdev,
46 struct kbase_context *kctx,
47 struct kbase_as *current_as)
49 struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
50 struct kbasep_js_per_as_data *js_per_as_data;
51 int as_nr = current_as->number;
53 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
54 lockdep_assert_held(&js_devdata->runpool_mutex);
55 lockdep_assert_held(¤t_as->transaction_mutex);
56 lockdep_assert_held(&js_devdata->runpool_irq.lock);
58 js_per_as_data = &js_devdata->runpool_irq.per_as_data[as_nr];
60 /* Attribute handling */
61 kbasep_js_ctx_attr_runpool_retain_ctx(kbdev, kctx);
63 /* Assign addr space */
66 /* If the GPU is currently powered, activate this address space on the
68 if (kbdev->pm.backend.gpu_powered)
69 kbase_mmu_update(kctx);
70 /* If the GPU was not powered then the MMU will be reprogrammed on the
71 * next pm_context_active() */
73 /* Allow it to run jobs */
74 kbasep_js_set_submit_allowed(js_devdata, kctx);
77 js_per_as_data->kctx = kctx;
78 js_per_as_data->as_busy_refcount = 0;
80 kbase_js_runpool_inc_context_count(kbdev, kctx);
84 * release_addr_space - Release an address space
85 * @kbdev: Kbase device
86 * @kctx_as_nr: Address space of context to release
87 * @kctx: Context being released
89 * Context: kbasep_js_device_data.runpool_mutex must be held
91 * Release an address space, making it available for being picked again.
93 static void release_addr_space(struct kbase_device *kbdev, int kctx_as_nr,
94 struct kbase_context *kctx)
96 struct kbasep_js_device_data *js_devdata;
97 u16 as_bit = (1u << kctx_as_nr);
99 js_devdata = &kbdev->js_data;
100 lockdep_assert_held(&js_devdata->runpool_mutex);
102 /* The address space must not already be free */
103 KBASE_DEBUG_ASSERT(!(js_devdata->as_free & as_bit));
105 js_devdata->as_free |= as_bit;
107 kbase_js_runpool_dec_context_count(kbdev, kctx);
110 bool kbase_backend_use_ctx_sched(struct kbase_device *kbdev,
111 struct kbase_context *kctx)
115 if (kbdev->hwaccess.active_kctx == kctx) {
116 /* Context is already active */
120 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
121 struct kbasep_js_per_as_data *js_per_as_data =
122 &kbdev->js_data.runpool_irq.per_as_data[i];
124 if (js_per_as_data->kctx == kctx) {
125 /* Context already has ASID - mark as active */
130 /* Context does not have address space assigned */
134 void kbase_backend_release_ctx_irq(struct kbase_device *kbdev,
135 struct kbase_context *kctx)
137 struct kbasep_js_per_as_data *js_per_as_data;
138 int as_nr = kctx->as_nr;
140 if (as_nr == KBASEP_AS_NR_INVALID) {
141 WARN(1, "Attempting to release context without ASID\n");
145 lockdep_assert_held(&kbdev->as[as_nr].transaction_mutex);
146 lockdep_assert_held(&kbdev->js_data.runpool_irq.lock);
148 js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[kctx->as_nr];
149 if (js_per_as_data->as_busy_refcount != 0) {
150 WARN(1, "Attempting to release active ASID\n");
154 /* Release context from address space */
155 js_per_as_data->kctx = NULL;
157 kbasep_js_clear_submit_allowed(&kbdev->js_data, kctx);
158 /* If the GPU is currently powered, de-activate this address space on
160 if (kbdev->pm.backend.gpu_powered)
161 kbase_mmu_disable(kctx);
162 /* If the GPU was not powered then the MMU will be reprogrammed on the
163 * next pm_context_active() */
165 release_addr_space(kbdev, as_nr, kctx);
166 kctx->as_nr = KBASEP_AS_NR_INVALID;
169 void kbase_backend_release_ctx_noirq(struct kbase_device *kbdev,
170 struct kbase_context *kctx)
174 void kbase_backend_release_free_address_space(struct kbase_device *kbdev,
177 struct kbasep_js_device_data *js_devdata;
179 js_devdata = &kbdev->js_data;
181 lockdep_assert_held(&js_devdata->runpool_mutex);
183 js_devdata->as_free |= (1 << as_nr);
187 * check_is_runpool_full - check whether the runpool is full for a specified
189 * @kbdev: Kbase device
190 * @kctx: Kbase context
192 * If kctx == NULL, then this makes the least restrictive check on the
193 * runpool. A specific context that is supplied immediately after could fail
194 * the check, even under the same conditions.
196 * Therefore, once a context is obtained you \b must re-check it with this
197 * function, since the return value could change to false.
200 * In all cases, the caller must hold kbasep_js_device_data.runpool_mutex.
201 * When kctx != NULL the caller must hold the
202 * kbasep_js_kctx_info.ctx.jsctx_mutex.
203 * When kctx == NULL, then the caller need not hold any jsctx_mutex locks (but
204 * it doesn't do any harm to do so).
206 * Return: true if the runpool is full
208 static bool check_is_runpool_full(struct kbase_device *kbdev,
209 struct kbase_context *kctx)
211 struct kbasep_js_device_data *js_devdata;
212 bool is_runpool_full;
214 js_devdata = &kbdev->js_data;
215 lockdep_assert_held(&js_devdata->runpool_mutex);
217 /* Regardless of whether a context is submitting or not, can't have more
218 * than there are HW address spaces */
219 is_runpool_full = (bool) (js_devdata->nr_all_contexts_running >=
220 kbdev->nr_hw_address_spaces);
222 if (kctx != NULL && (kctx->jctx.sched_info.ctx.flags &
223 KBASE_CTX_FLAG_SUBMIT_DISABLED) == 0) {
224 lockdep_assert_held(&kctx->jctx.sched_info.ctx.jsctx_mutex);
225 /* Contexts that submit might use less of the address spaces
226 * available, due to HW workarounds. In which case, the runpool
227 * is also full when the number of submitting contexts exceeds
228 * the number of submittable address spaces.
230 * Both checks must be made: can have nr_user_address_spaces ==
231 * nr_hw_address spaces, and at the same time can have
232 * nr_user_contexts_running < nr_all_contexts_running. */
233 is_runpool_full |= (bool)
234 (js_devdata->nr_user_contexts_running >=
235 kbdev->nr_user_address_spaces);
238 return is_runpool_full;
241 int kbase_backend_find_free_address_space(struct kbase_device *kbdev,
242 struct kbase_context *kctx)
244 struct kbasep_js_device_data *js_devdata;
245 struct kbasep_js_kctx_info *js_kctx_info;
249 js_devdata = &kbdev->js_data;
250 js_kctx_info = &kctx->jctx.sched_info;
252 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
253 mutex_lock(&js_devdata->runpool_mutex);
255 /* First try to find a free address space */
256 if (check_is_runpool_full(kbdev, kctx))
259 i = ffs(js_devdata->as_free) - 1;
261 if (i >= 0 && i < kbdev->nr_hw_address_spaces) {
262 js_devdata->as_free &= ~(1 << i);
264 mutex_unlock(&js_devdata->runpool_mutex);
265 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
270 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
272 /* No address space currently free, see if we can release one */
273 for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
274 struct kbasep_js_per_as_data *js_per_as_data;
275 struct kbasep_js_kctx_info *as_js_kctx_info;
276 struct kbase_context *as_kctx;
278 js_per_as_data = &kbdev->js_data.runpool_irq.per_as_data[i];
279 as_kctx = js_per_as_data->kctx;
280 as_js_kctx_info = &as_kctx->jctx.sched_info;
282 /* Don't release privileged or active contexts, or contexts with
284 if (as_kctx && !(as_kctx->jctx.sched_info.ctx.flags &
285 KBASE_CTX_FLAG_PRIVILEGED) &&
286 js_per_as_data->as_busy_refcount == 0) {
287 if (!kbasep_js_runpool_retain_ctx_nolock(kbdev,
289 WARN(1, "Failed to retain active context\n");
291 spin_unlock_irqrestore(
292 &js_devdata->runpool_irq.lock,
294 mutex_unlock(&js_devdata->runpool_mutex);
295 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
297 return KBASEP_AS_NR_INVALID;
300 kbasep_js_clear_submit_allowed(js_devdata, as_kctx);
302 /* Drop and retake locks to take the jsctx_mutex on the
303 * context we're about to release without violating lock
306 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock,
308 mutex_unlock(&js_devdata->runpool_mutex);
309 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
312 /* Release context from address space */
313 mutex_lock(&as_js_kctx_info->ctx.jsctx_mutex);
314 mutex_lock(&js_devdata->runpool_mutex);
316 kbasep_js_runpool_release_ctx_nolock(kbdev, as_kctx);
318 if (!as_js_kctx_info->ctx.is_scheduled) {
319 kbasep_js_runpool_requeue_or_kill_ctx(kbdev,
323 js_devdata->as_free &= ~(1 << i);
325 mutex_unlock(&js_devdata->runpool_mutex);
326 mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
331 /* Context was retained while locks were dropped,
332 * continue looking for free AS */
334 mutex_unlock(&js_devdata->runpool_mutex);
335 mutex_unlock(&as_js_kctx_info->ctx.jsctx_mutex);
337 mutex_lock(&js_kctx_info->ctx.jsctx_mutex);
338 mutex_lock(&js_devdata->runpool_mutex);
339 spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
343 spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
345 mutex_unlock(&js_devdata->runpool_mutex);
346 mutex_unlock(&js_kctx_info->ctx.jsctx_mutex);
348 return KBASEP_AS_NR_INVALID;
351 bool kbase_backend_use_ctx(struct kbase_device *kbdev,
352 struct kbase_context *kctx,
355 struct kbasep_js_device_data *js_devdata;
356 struct kbasep_js_kctx_info *js_kctx_info;
357 struct kbase_as *new_address_space = NULL;
359 js_devdata = &kbdev->js_data;
360 js_kctx_info = &kctx->jctx.sched_info;
362 if (kbdev->hwaccess.active_kctx == kctx ||
363 kctx->as_nr != KBASEP_AS_NR_INVALID ||
364 as_nr == KBASEP_AS_NR_INVALID) {
365 WARN(1, "Invalid parameters to use_ctx()\n");
369 new_address_space = &kbdev->as[as_nr];
371 lockdep_assert_held(&js_devdata->runpool_mutex);
372 lockdep_assert_held(&new_address_space->transaction_mutex);
373 lockdep_assert_held(&js_devdata->runpool_irq.lock);
375 assign_and_activate_kctx_addr_space(kbdev, kctx, new_address_space);
377 if ((js_kctx_info->ctx.flags & KBASE_CTX_FLAG_PRIVILEGED) != 0) {
378 /* We need to retain it to keep the corresponding address space
380 kbasep_js_runpool_retain_ctx_nolock(kbdev, kctx);