3 * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
20 #include <mali_kbase.h>
22 #if defined(CONFIG_DMA_SHARED_BUFFER)
23 #include <linux/dma-buf.h>
24 #include <asm/cacheflush.h>
25 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
26 #include <linux/dma-mapping.h>
29 #include <linux/syscalls.h>
30 #include "mali_kbase_sync.h"
32 #include <mali_base_kernel.h>
33 #include <mali_kbase_hwaccess_time.h>
34 #include <mali_kbase_mem_linux.h>
35 #include <linux/version.h>
36 #include <linux/ktime.h>
37 #include <linux/pfn.h>
38 #include <linux/sched.h>
40 /* Mask to check cache alignment of data structures */
41 #define KBASE_CACHE_ALIGNMENT_MASK ((1<<L1_CACHE_SHIFT)-1)
44 * @file mali_kbase_softjobs.c
46 * This file implements the logic behind software only jobs that are
47 * executed within the driver rather than being handed over to the GPU.
50 void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
52 struct kbase_context *kctx = katom->kctx;
55 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
56 list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
57 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
60 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
62 struct kbase_context *kctx = katom->kctx;
65 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
66 list_del(&katom->queue);
67 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
70 static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
72 struct kbase_context *kctx = katom->kctx;
74 /* Record the start time of this atom so we could cancel it at
77 katom->start_timestamp = ktime_get();
79 /* Add the atom to the waiting list before the timer is
80 * (re)started to make sure that it gets processed.
82 kbasep_add_waiting_soft_job(katom);
84 /* Schedule timeout of this atom after a period if it is not active */
85 if (!timer_pending(&kctx->soft_job_timeout)) {
86 int timeout_ms = atomic_read(
87 &kctx->kbdev->js_data.soft_job_timeout_ms);
88 mod_timer(&kctx->soft_job_timeout,
89 jiffies + msecs_to_jiffies(timeout_ms));
93 static int kbasep_read_soft_event_status(
94 struct kbase_context *kctx, u64 evt, unsigned char *status)
96 unsigned char *mapped_evt;
97 struct kbase_vmap_struct map;
99 mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
103 *status = *mapped_evt;
105 kbase_vunmap(kctx, &map);
110 static int kbasep_write_soft_event_status(
111 struct kbase_context *kctx, u64 evt, unsigned char new_status)
113 unsigned char *mapped_evt;
114 struct kbase_vmap_struct map;
116 if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
117 (new_status != BASE_JD_SOFT_EVENT_RESET))
120 mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
124 *mapped_evt = new_status;
126 kbase_vunmap(kctx, &map);
131 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
133 struct kbase_vmap_struct map;
136 struct base_dump_cpu_gpu_counters data;
140 struct kbase_context *kctx = katom->kctx;
143 memset(&data, 0, sizeof(data));
145 /* Take the PM active reference as late as possible - otherwise, it could
146 * delay suspend until we process the atom (which may be at the end of a
147 * long chain of dependencies */
148 pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
150 struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
152 /* We're suspended - queue this on the list of suspended jobs
153 * Use dep_item[1], because dep_item[0] was previously in use
154 * for 'waiting_soft_jobs'.
156 mutex_lock(&js_devdata->runpool_mutex);
157 list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
158 mutex_unlock(&js_devdata->runpool_mutex);
160 /* Also adding this to the list of waiting soft job */
161 kbasep_add_waiting_soft_job(katom);
163 return pm_active_err;
166 kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
169 kbase_pm_context_idle(kctx->kbdev);
171 data.sec = ts.tv_sec;
172 data.usec = ts.tv_nsec / 1000;
173 data.system_time = system_time;
174 data.cycle_counter = cycle_counter;
176 /* Assume this atom will be cancelled until we know otherwise */
177 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
179 /* GPU_WR access is checked on the range for returning the result to
180 * userspace for the following reasons:
181 * - security, this is currently how imported user bufs are checked.
182 * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
183 user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
187 memcpy(user_result, &data, sizeof(data));
189 kbase_vunmap(kctx, &map);
191 /* Atom was fine - mark it as done */
192 katom->event_code = BASE_JD_EVENT_DONE;
199 static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
202 struct sync_timeline *timeline;
204 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
205 if (!list_is_singular(&katom->fence->pt_list_head)) {
207 if (katom->fence->num_fences != 1) {
209 /* Not exactly one item in the list - so it didn't (directly) come from us */
210 return BASE_JD_EVENT_JOB_CANCELLED;
213 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
214 pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list);
216 pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
218 timeline = sync_pt_parent(pt);
220 if (!kbase_sync_timeline_is_ours(timeline)) {
221 /* Fence has a sync_pt which isn't ours! */
222 return BASE_JD_EVENT_JOB_CANCELLED;
225 kbase_sync_signal_pt(pt, result);
227 sync_timeline_signal(timeline);
229 return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
232 static void kbase_fence_wait_worker(struct work_struct *data)
234 struct kbase_jd_atom *katom;
235 struct kbase_context *kctx;
237 katom = container_of(data, struct kbase_jd_atom, work);
240 mutex_lock(&kctx->jctx.lock);
241 kbasep_remove_waiting_soft_job(katom);
242 kbase_finish_soft_job(katom);
243 if (jd_done_nolock(katom, NULL))
244 kbase_js_sched_all(kctx->kbdev);
245 mutex_unlock(&kctx->jctx.lock);
248 static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
250 struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
251 struct kbase_context *kctx;
253 KBASE_DEBUG_ASSERT(NULL != katom);
257 KBASE_DEBUG_ASSERT(NULL != kctx);
259 /* Propagate the fence status to the atom.
260 * If negative then cancel this atom and its dependencies.
262 if (kbase_fence_get_status(fence) < 0)
263 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
265 /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
267 * The issue is that we may signal the timeline while holding kctx->jctx.lock and
268 * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
271 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
272 INIT_WORK(&katom->work, kbase_fence_wait_worker);
273 queue_work(kctx->jctx.job_done_wq, &katom->work);
276 static int kbase_fence_wait(struct kbase_jd_atom *katom)
280 KBASE_DEBUG_ASSERT(NULL != katom);
281 KBASE_DEBUG_ASSERT(NULL != katom->kctx);
283 sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
285 ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
288 /* Already signalled */
293 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
294 /* We should cause the dependent jobs in the bag to be failed,
295 * to do this we schedule the work queue to complete this job */
296 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
297 INIT_WORK(&katom->work, kbase_fence_wait_worker);
298 queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
301 #ifdef CONFIG_MALI_FENCE_DEBUG
302 /* The timeout code will add this job to the list of waiting soft jobs.
304 kbasep_add_waiting_with_timeout(katom);
306 kbasep_add_waiting_soft_job(katom);
312 static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
316 pr_err("katom null.forbiden return\n");
321 pr_info("katom->fence null.may release out of order.so continue unfinished step\n");
323 if return here,may result in infinite loop?
324 we need to delete dep_item[0] from kctx->waiting_soft_jobs?
325 jd_done_nolock function move the dep_item[0] to complete job list and then delete?
330 if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
331 /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
335 /* Wait was cancelled - zap the atoms */
337 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
339 kbasep_remove_waiting_soft_job(katom);
340 kbase_finish_soft_job(katom);
342 if (jd_done_nolock(katom, NULL))
343 kbase_js_sched_all(katom->kctx->kbdev);
345 #endif /* CONFIG_SYNC */
347 static void kbasep_soft_event_complete_job(struct work_struct *work)
349 struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
351 struct kbase_context *kctx = katom->kctx;
354 mutex_lock(&kctx->jctx.lock);
355 resched = jd_done_nolock(katom, NULL);
356 mutex_unlock(&kctx->jctx.lock);
359 kbase_js_sched_all(kctx->kbdev);
362 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
364 int cancel_timer = 1;
365 struct list_head *entry, *tmp;
366 unsigned long lflags;
368 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
369 list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
370 struct kbase_jd_atom *katom = list_entry(
371 entry, struct kbase_jd_atom, queue);
373 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
374 case BASE_JD_REQ_SOFT_EVENT_WAIT:
375 if (katom->jc == evt) {
376 list_del(&katom->queue);
378 katom->event_code = BASE_JD_EVENT_DONE;
379 INIT_WORK(&katom->work,
380 kbasep_soft_event_complete_job);
381 queue_work(kctx->jctx.job_done_wq,
384 /* There are still other waiting jobs, we cannot
385 * cancel the timer yet.
390 #ifdef CONFIG_MALI_FENCE_DEBUG
391 case BASE_JD_REQ_SOFT_FENCE_WAIT:
392 /* Keep the timer running if fence debug is enabled and
393 * there are waiting fence jobs.
402 del_timer(&kctx->soft_job_timeout);
403 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
406 #ifdef CONFIG_MALI_FENCE_DEBUG
407 static char *kbase_fence_debug_status_string(int status)
417 static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
419 struct kbase_context *kctx = katom->kctx;
420 struct device *dev = kctx->kbdev->dev;
423 for (i = 0; i < 2; i++) {
424 struct kbase_jd_atom *dep;
426 list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
427 if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
428 dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
431 if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
432 == BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
433 struct sync_fence *fence = dep->fence;
434 int status = kbase_fence_get_status(fence);
436 /* Found blocked trigger fence. */
438 "\tVictim trigger atom %d fence [%p] %s: %s\n",
439 kbase_jd_atom_id(kctx, dep),
441 kbase_fence_debug_status_string(status));
444 kbase_fence_debug_check_atom(dep);
449 static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
451 struct kbase_context *kctx = katom->kctx;
452 struct device *dev = katom->kctx->kbdev->dev;
453 struct sync_fence *fence = katom->fence;
454 int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
455 int status = kbase_fence_get_status(fence);
456 unsigned long lflags;
458 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
460 dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
461 kctx->tgid, kctx->id,
462 kbase_jd_atom_id(kctx, katom),
464 dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
466 kbase_fence_debug_status_string(status));
468 /* Search for blocked trigger atoms */
469 kbase_fence_debug_check_atom(katom);
471 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
473 /* Dump out the full state of all the Android sync fences.
474 * The function sync_dump() isn't exported to modules, so force
475 * sync_fence_wait() to time out to trigger sync_dump().
477 sync_fence_wait(fence, 1);
480 struct kbase_fence_debug_work {
481 struct kbase_jd_atom *katom;
482 struct work_struct work;
485 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
487 struct kbase_fence_debug_work *w = container_of(work,
488 struct kbase_fence_debug_work, work);
489 struct kbase_jd_atom *katom = w->katom;
490 struct kbase_context *kctx = katom->kctx;
492 mutex_lock(&kctx->jctx.lock);
493 kbase_fence_debug_wait_timeout(katom);
494 mutex_unlock(&kctx->jctx.lock);
499 static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
501 struct kbase_fence_debug_work *work;
502 struct kbase_context *kctx = katom->kctx;
504 /* Enqueue fence debug worker. Use job_done_wq to get
505 * debug print ordered with job completion.
507 work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
508 /* Ignore allocation failure. */
511 INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
512 queue_work(kctx->jctx.job_done_wq, &work->work);
515 #endif /* CONFIG_MALI_FENCE_DEBUG */
517 void kbasep_soft_job_timeout_worker(unsigned long data)
519 struct kbase_context *kctx = (struct kbase_context *)data;
520 u32 timeout_ms = (u32)atomic_read(
521 &kctx->kbdev->js_data.soft_job_timeout_ms);
522 struct timer_list *timer = &kctx->soft_job_timeout;
523 ktime_t cur_time = ktime_get();
524 bool restarting = false;
525 unsigned long lflags;
526 struct list_head *entry, *tmp;
528 spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
529 list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
530 struct kbase_jd_atom *katom = list_entry(entry,
531 struct kbase_jd_atom, queue);
532 s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
533 katom->start_timestamp));
535 if (elapsed_time < (s64)timeout_ms) {
540 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
541 case BASE_JD_REQ_SOFT_EVENT_WAIT:
542 /* Take it out of the list to ensure that it
543 * will be cancelled in all cases
545 list_del(&katom->queue);
547 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
548 INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
549 queue_work(kctx->jctx.job_done_wq, &katom->work);
551 #ifdef CONFIG_MALI_FENCE_DEBUG
552 case BASE_JD_REQ_SOFT_FENCE_WAIT:
553 kbase_fence_debug_timeout(katom);
560 mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
561 spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
564 static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
566 struct kbase_context *kctx = katom->kctx;
567 unsigned char status;
569 /* The status of this soft-job is stored in jc */
570 if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
571 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
575 if (status == BASE_JD_SOFT_EVENT_SET)
576 return 0; /* Event already set, nothing to do */
578 kbasep_add_waiting_with_timeout(katom);
583 static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
584 unsigned char new_status)
586 /* Complete jobs waiting on the same event */
587 struct kbase_context *kctx = katom->kctx;
589 if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
590 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
594 if (new_status == BASE_JD_SOFT_EVENT_SET)
595 kbasep_complete_triggered_soft_events(kctx, katom->jc);
599 * kbase_soft_event_update() - Update soft event state
600 * @kctx: Pointer to context
601 * @event: Event to update
602 * @new_status: New status value of event
604 * Update the event, and wake up any atoms waiting for the event.
606 * Return: 0 on success, a negative error code on failure.
608 int kbase_soft_event_update(struct kbase_context *kctx,
610 unsigned char new_status)
614 mutex_lock(&kctx->jctx.lock);
616 if (kbasep_write_soft_event_status(kctx, event, new_status)) {
621 if (new_status == BASE_JD_SOFT_EVENT_SET)
622 kbasep_complete_triggered_soft_events(kctx, event);
625 mutex_unlock(&kctx->jctx.lock);
630 static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
632 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
633 if (jd_done_nolock(katom, NULL))
634 kbase_js_sched_all(katom->kctx->kbdev);
637 struct kbase_debug_copy_buffer {
642 struct kbase_mem_phy_alloc *gpu_alloc;
644 struct page **extres_pages;
648 static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
650 struct page **pages = buffer->extres_pages;
651 int nr_pages = buffer->nr_extres_pages;
656 for (i = 0; i < nr_pages; i++) {
657 struct page *pg = pages[i];
666 static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
668 struct kbase_debug_copy_buffer *buffers =
669 (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
671 unsigned int nr = katom->nr_extres;
676 kbase_gpu_vm_lock(katom->kctx);
677 for (i = 0; i < nr; i++) {
679 struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
681 if (!buffers[i].pages)
683 for (p = 0; p < buffers[i].nr_pages; p++) {
684 struct page *pg = buffers[i].pages[p];
689 kfree(buffers[i].pages);
691 switch (gpu_alloc->type) {
692 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
694 free_user_buffer(&buffers[i]);
698 /* Nothing to be done. */
701 kbase_mem_phy_alloc_put(gpu_alloc);
704 kbase_gpu_vm_unlock(katom->kctx);
710 static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
712 struct kbase_debug_copy_buffer *buffers;
713 struct base_jd_debug_copy_buffer *user_buffers = NULL;
715 unsigned int nr = katom->nr_extres;
717 void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
722 buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
728 katom->jc = (u64)(uintptr_t)buffers;
730 user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
737 ret = copy_from_user(user_buffers, user_structs,
738 sizeof(*user_buffers)*nr);
742 for (i = 0; i < nr; i++) {
743 u64 addr = user_buffers[i].address;
744 u64 page_addr = addr & PAGE_MASK;
745 u64 end_page_addr = addr + user_buffers[i].size - 1;
746 u64 last_page_addr = end_page_addr & PAGE_MASK;
747 int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
749 struct kbase_va_region *reg;
750 struct base_external_resource user_extres;
755 buffers[i].nr_pages = nr_pages;
756 buffers[i].offset = addr & ~PAGE_MASK;
757 if (buffers[i].offset >= PAGE_SIZE) {
761 buffers[i].size = user_buffers[i].size;
763 buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
765 if (!buffers[i].pages) {
770 pinned_pages = get_user_pages_fast(page_addr,
774 if (pinned_pages < 0) {
778 if (pinned_pages != nr_pages) {
783 user_extres = user_buffers[i].extres;
784 if (user_extres.ext_resource == 0ULL) {
789 kbase_gpu_vm_lock(katom->kctx);
790 reg = kbase_region_tracker_find_region_enclosing_address(
791 katom->kctx, user_extres.ext_resource &
792 ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
794 if (NULL == reg || NULL == reg->gpu_alloc ||
795 (reg->flags & KBASE_REG_FREE)) {
800 buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
801 buffers[i].nr_extres_pages = reg->nr_pages;
803 if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
804 dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
806 switch (reg->gpu_alloc->type) {
807 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
809 struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
810 unsigned long nr_pages =
811 alloc->imported.user_buf.nr_pages;
813 if (alloc->imported.user_buf.mm != current->mm) {
817 buffers[i].extres_pages = kcalloc(nr_pages,
818 sizeof(struct page *), GFP_KERNEL);
819 if (!buffers[i].extres_pages) {
824 ret = get_user_pages_fast(
825 alloc->imported.user_buf.address,
827 buffers[i].extres_pages);
833 case KBASE_MEM_TYPE_IMPORTED_UMP:
835 dev_warn(katom->kctx->kbdev->dev,
836 "UMP is not supported for debug_copy jobs\n");
841 /* Nothing to be done. */
844 kbase_gpu_vm_unlock(katom->kctx);
851 kbase_gpu_vm_unlock(katom->kctx);
857 /* Frees allocated memory for kbase_debug_copy_job struct, including
858 * members, and sets jc to 0 */
859 kbase_debug_copy_finish(katom);
863 static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
864 void *extres_page, struct page **pages, unsigned int nr_pages,
865 unsigned int *target_page_nr, size_t offset, size_t *to_copy)
867 void *target_page = kmap(pages[*target_page_nr]);
868 size_t chunk = PAGE_SIZE-offset;
871 *target_page_nr += 1;
872 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
876 chunk = min(chunk, *to_copy);
878 memcpy(target_page + offset, extres_page, chunk);
881 kunmap(pages[*target_page_nr]);
883 *target_page_nr += 1;
884 if (*target_page_nr >= nr_pages)
887 target_page = kmap(pages[*target_page_nr]);
889 *target_page_nr += 1;
890 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
894 KBASE_DEBUG_ASSERT(target_page);
896 chunk = min(offset, *to_copy);
897 memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
900 kunmap(pages[*target_page_nr]);
903 static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
904 struct kbase_debug_copy_buffer *buf_data)
907 unsigned int target_page_nr = 0;
908 struct page **pages = buf_data->pages;
909 u64 offset = buf_data->offset;
910 size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
911 size_t to_copy = min(extres_size, buf_data->size);
912 struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
915 KBASE_DEBUG_ASSERT(pages != NULL);
917 kbase_gpu_vm_lock(kctx);
923 switch (gpu_alloc->type) {
924 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
926 for (i = 0; i < buf_data->nr_extres_pages; i++) {
927 struct page *pg = buf_data->extres_pages[i];
928 void *extres_page = kmap(pg);
931 kbase_mem_copy_from_extres_page(kctx,
938 if (target_page_nr >= buf_data->nr_pages)
944 #ifdef CONFIG_DMA_SHARED_BUFFER
945 case KBASE_MEM_TYPE_IMPORTED_UMM: {
946 struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
948 KBASE_DEBUG_ASSERT(dma_buf != NULL);
949 KBASE_DEBUG_ASSERT(dma_buf->size ==
950 buf_data->nr_extres_pages * PAGE_SIZE);
952 ret = dma_buf_begin_cpu_access(dma_buf,
953 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
954 0, buf_data->nr_extres_pages*PAGE_SIZE,
960 for (i = 0; i < buf_data->nr_extres_pages; i++) {
962 void *extres_page = dma_buf_kmap(dma_buf, i);
965 kbase_mem_copy_from_extres_page(kctx,
971 dma_buf_kunmap(dma_buf, i, extres_page);
972 if (target_page_nr >= buf_data->nr_pages)
975 dma_buf_end_cpu_access(dma_buf,
976 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
977 0, buf_data->nr_extres_pages*PAGE_SIZE,
987 kbase_gpu_vm_unlock(kctx);
992 static int kbase_debug_copy(struct kbase_jd_atom *katom)
994 struct kbase_debug_copy_buffer *buffers =
995 (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
998 for (i = 0; i < katom->nr_extres; i++) {
999 int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
1008 static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
1010 __user void *data = (__user void *)(uintptr_t) katom->jc;
1011 struct base_jit_alloc_info *info;
1014 /* Fail the job if there is no info structure */
1020 /* Copy the information for safe access and future storage */
1021 info = kzalloc(sizeof(*info), GFP_KERNEL);
1027 if (copy_from_user(info, data, sizeof(*info)) != 0) {
1032 /* If the ID is zero then fail the job */
1033 if (info->id == 0) {
1038 /* Sanity check that the PA fits within the VA */
1039 if (info->va_pages < info->commit_pages) {
1044 /* Ensure the GPU address is correctly aligned */
1045 if ((info->gpu_alloc_addr & 0x7) != 0) {
1050 /* Replace the user pointer with our kernel allocated info structure */
1051 katom->jc = (u64)(uintptr_t) info;
1055 * The provided info->gpu_alloc_addr isn't validated here as
1056 * userland can cache allocations which means that even
1057 * though the region is valid it doesn't represent the
1058 * same thing it used to.
1060 * Complete validation of va_pages, commit_pages and extent
1061 * isn't done here as it will be done during the call to
1073 static void kbase_jit_allocate_process(struct kbase_jd_atom *katom)
1075 struct kbase_context *kctx = katom->kctx;
1076 struct base_jit_alloc_info *info;
1077 struct kbase_va_region *reg;
1078 struct kbase_vmap_struct mapping;
1081 info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1083 /* The JIT ID is still in use so fail the allocation */
1084 if (kctx->jit_alloc[info->id]) {
1085 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1090 * Mark the allocation so we know it's in use even if the
1091 * allocation itself fails.
1093 kctx->jit_alloc[info->id] = (struct kbase_va_region *) -1;
1095 /* Create a JIT allocation */
1096 reg = kbase_jit_allocate(kctx, info);
1098 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1103 * Write the address of the JIT allocation to the user provided
1106 ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
1110 * Leave the allocation "live" as the JIT free jit will be
1113 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1117 *ptr = reg->start_pfn << PAGE_SHIFT;
1118 kbase_vunmap(kctx, &mapping);
1120 katom->event_code = BASE_JD_EVENT_DONE;
1123 * Bind it to the user provided ID. Do this last so we can check for
1124 * the JIT free racing this JIT alloc job.
1126 kctx->jit_alloc[info->id] = reg;
1129 static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
1131 struct base_jit_alloc_info *info;
1133 info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1134 /* Free the info structure */
1138 static void kbase_jit_free_process(struct kbase_jd_atom *katom)
1140 struct kbase_context *kctx = katom->kctx;
1141 u8 id = (u8) katom->jc;
1144 * If the ID is zero or it is not in use yet then fail the job.
1146 if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
1147 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1152 * If the ID is valid but the allocation request failed still succeed
1153 * this soft job but don't try and free the allocation.
1155 if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
1156 kbase_jit_free(kctx, kctx->jit_alloc[id]);
1158 kctx->jit_alloc[id] = NULL;
1161 static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
1163 __user struct base_external_resource_list *user_ext_res;
1164 struct base_external_resource_list *ext_res;
1169 user_ext_res = (__user struct base_external_resource_list *)
1170 (uintptr_t) katom->jc;
1172 /* Fail the job if there is no info structure */
1173 if (!user_ext_res) {
1178 if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
1183 /* Is the number of external resources in range? */
1184 if (!count || count > BASE_EXT_RES_COUNT_MAX) {
1189 /* Copy the information for safe access and future storage */
1190 copy_size = sizeof(*ext_res);
1191 copy_size += sizeof(struct base_external_resource) * (count - 1);
1192 ext_res = kzalloc(copy_size, GFP_KERNEL);
1198 if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
1204 * Overwrite the count with the first value incase it was changed
1207 ext_res->count = count;
1210 * Replace the user pointer with our kernel allocated
1211 * ext_res structure.
1213 katom->jc = (u64)(uintptr_t) ext_res;
1223 static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
1225 struct base_external_resource_list *ext_res;
1227 bool failed = false;
1229 ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1233 kbase_gpu_vm_lock(katom->kctx);
1235 for (i = 0; i < ext_res->count; i++) {
1238 gpu_addr = ext_res->ext_res[i].ext_resource &
1239 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1241 if (!kbase_sticky_resource_acquire(katom->kctx,
1245 if (!kbase_sticky_resource_release(katom->kctx, NULL,
1251 * In the case of unmap we continue unmapping other resources in the
1252 * case of failure but will always report failure if _any_ unmap
1256 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1258 katom->event_code = BASE_JD_EVENT_DONE;
1260 kbase_gpu_vm_unlock(katom->kctx);
1268 gpu_addr = ext_res->ext_res[i].ext_resource &
1269 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1271 kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
1274 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1275 kbase_gpu_vm_unlock(katom->kctx);
1281 static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
1283 struct base_external_resource_list *ext_res;
1285 ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1286 /* Free the info structure */
1290 int kbase_process_soft_job(struct kbase_jd_atom *katom)
1292 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1293 case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1294 return kbase_dump_cpu_gpu_time(katom);
1296 case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1297 KBASE_DEBUG_ASSERT(katom->fence != NULL);
1298 katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1299 /* Release the reference as we don't need it any more */
1300 sync_fence_put(katom->fence);
1301 katom->fence = NULL;
1303 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1304 return kbase_fence_wait(katom);
1305 #endif /* CONFIG_SYNC */
1306 case BASE_JD_REQ_SOFT_REPLAY:
1307 return kbase_replay_process(katom);
1308 case BASE_JD_REQ_SOFT_EVENT_WAIT:
1309 return kbasep_soft_event_wait(katom);
1310 case BASE_JD_REQ_SOFT_EVENT_SET:
1311 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
1313 case BASE_JD_REQ_SOFT_EVENT_RESET:
1314 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
1316 case BASE_JD_REQ_SOFT_DEBUG_COPY:
1318 int res = kbase_debug_copy(katom);
1321 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1324 case BASE_JD_REQ_SOFT_JIT_ALLOC:
1325 return -EINVAL; /* Temporarily disabled */
1326 kbase_jit_allocate_process(katom);
1328 case BASE_JD_REQ_SOFT_JIT_FREE:
1329 return -EINVAL; /* Temporarily disabled */
1330 kbase_jit_free_process(katom);
1332 case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1333 kbase_ext_res_process(katom, true);
1335 case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1336 kbase_ext_res_process(katom, false);
1340 /* Atom is complete */
1344 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
1346 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1348 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1349 kbase_fence_cancel_wait(katom);
1352 case BASE_JD_REQ_SOFT_EVENT_WAIT:
1353 kbasep_soft_event_cancel_job(katom);
1356 /* This soft-job doesn't support cancellation! */
1357 KBASE_DEBUG_ASSERT(0);
1361 int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
1363 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1364 case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1366 if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
1371 case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1373 struct base_fence fence;
1376 if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1379 fd = kbase_stream_create_fence(fence.basep.stream_fd);
1383 katom->fence = sync_fence_fdget(fd);
1385 if (katom->fence == NULL) {
1386 /* The only way the fence can be NULL is if userspace closed it for us.
1387 * So we don't need to clear it up */
1390 fence.basep.fd = fd;
1391 if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
1392 katom->fence = NULL;
1398 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1400 struct base_fence fence;
1402 if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1405 /* Get a reference to the fence object */
1406 katom->fence = sync_fence_fdget(fence.basep.fd);
1407 if (katom->fence == NULL)
1411 #endif /* CONFIG_SYNC */
1412 case BASE_JD_REQ_SOFT_JIT_ALLOC:
1413 return kbase_jit_allocate_prepare(katom);
1414 case BASE_JD_REQ_SOFT_REPLAY:
1415 case BASE_JD_REQ_SOFT_JIT_FREE:
1417 case BASE_JD_REQ_SOFT_EVENT_WAIT:
1418 case BASE_JD_REQ_SOFT_EVENT_SET:
1419 case BASE_JD_REQ_SOFT_EVENT_RESET:
1423 case BASE_JD_REQ_SOFT_DEBUG_COPY:
1424 return kbase_debug_copy_prepare(katom);
1425 case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1426 return kbase_ext_res_prepare(katom);
1427 case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1428 return kbase_ext_res_prepare(katom);
1430 /* Unsupported soft-job */
1436 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
1438 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1439 case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1443 case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1444 /* If fence has not yet been signalled, do it now */
1446 kbase_fence_trigger(katom, katom->event_code ==
1447 BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1448 sync_fence_put(katom->fence);
1449 katom->fence = NULL;
1452 case BASE_JD_REQ_SOFT_FENCE_WAIT:
1453 /* Release the reference to the fence object */
1455 sync_fence_put(katom->fence);
1456 katom->fence = NULL;
1459 #endif /* CONFIG_SYNC */
1461 case BASE_JD_REQ_SOFT_DEBUG_COPY:
1462 kbase_debug_copy_finish(katom);
1464 case BASE_JD_REQ_SOFT_JIT_ALLOC:
1465 kbase_jit_allocate_finish(katom);
1467 case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1468 kbase_ext_res_finish(katom);
1470 case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1471 kbase_ext_res_finish(katom);
1476 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
1478 LIST_HEAD(local_suspended_soft_jobs);
1479 struct kbase_jd_atom *tmp_iter;
1480 struct kbase_jd_atom *katom_iter;
1481 struct kbasep_js_device_data *js_devdata;
1482 bool resched = false;
1484 KBASE_DEBUG_ASSERT(kbdev);
1486 js_devdata = &kbdev->js_data;
1488 /* Move out the entire list */
1489 mutex_lock(&js_devdata->runpool_mutex);
1490 list_splice_init(&js_devdata->suspended_soft_jobs_list,
1491 &local_suspended_soft_jobs);
1492 mutex_unlock(&js_devdata->runpool_mutex);
1495 * Each atom must be detached from the list and ran separately -
1496 * it could be re-added to the old list, but this is unlikely
1498 list_for_each_entry_safe(katom_iter, tmp_iter,
1499 &local_suspended_soft_jobs, dep_item[1]) {
1500 struct kbase_context *kctx = katom_iter->kctx;
1502 mutex_lock(&kctx->jctx.lock);
1504 /* Remove from the global list */
1505 list_del(&katom_iter->dep_item[1]);
1506 /* Remove from the context's list of waiting soft jobs */
1507 kbasep_remove_waiting_soft_job(katom_iter);
1509 if (kbase_process_soft_job(katom_iter) == 0) {
1510 kbase_finish_soft_job(katom_iter);
1511 resched |= jd_done_nolock(katom_iter, NULL);
1513 KBASE_DEBUG_ASSERT((katom_iter->core_req &
1514 BASE_JD_REQ_SOFT_JOB_TYPE)
1515 != BASE_JD_REQ_SOFT_REPLAY);
1518 mutex_unlock(&kctx->jctx.lock);
1522 kbase_js_sched_all(kbdev);