07b862546f80c8025f54372575b9dfd93e88b0f0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_softjobs.c
1 /*
2  *
3  * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 #include <mali_kbase.h>
21
22 #if defined(CONFIG_DMA_SHARED_BUFFER)
23 #include <linux/dma-buf.h>
24 #include <asm/cacheflush.h>
25 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
26 #include <linux/dma-mapping.h>
27 #ifdef CONFIG_SYNC
28 #include "sync.h"
29 #include <linux/syscalls.h>
30 #include "mali_kbase_sync.h"
31 #endif
32 #include <mali_base_kernel.h>
33 #include <mali_kbase_hwaccess_time.h>
34 #include <mali_kbase_mem_linux.h>
35 #include <linux/version.h>
36 #include <linux/ktime.h>
37 #include <linux/pfn.h>
38 #include <linux/sched.h>
39
40 /* Mask to check cache alignment of data structures */
41 #define KBASE_CACHE_ALIGNMENT_MASK              ((1<<L1_CACHE_SHIFT)-1)
42
43 /**
44  * @file mali_kbase_softjobs.c
45  *
46  * This file implements the logic behind software only jobs that are
47  * executed within the driver rather than being handed over to the GPU.
48  */
49
50 void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
51 {
52         struct kbase_context *kctx = katom->kctx;
53         unsigned long lflags;
54
55         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
56         list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
57         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
58 }
59
60 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
61 {
62         struct kbase_context *kctx = katom->kctx;
63         unsigned long lflags;
64
65         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
66         list_del(&katom->queue);
67         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
68 }
69
70 static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
71 {
72         struct kbase_context *kctx = katom->kctx;
73
74         /* Record the start time of this atom so we could cancel it at
75          * the right time.
76          */
77         katom->start_timestamp = ktime_get();
78
79         /* Add the atom to the waiting list before the timer is
80          * (re)started to make sure that it gets processed.
81          */
82         kbasep_add_waiting_soft_job(katom);
83
84         /* Schedule timeout of this atom after a period if it is not active */
85         if (!timer_pending(&kctx->soft_job_timeout)) {
86                 int timeout_ms = atomic_read(
87                                 &kctx->kbdev->js_data.soft_job_timeout_ms);
88                 mod_timer(&kctx->soft_job_timeout,
89                           jiffies + msecs_to_jiffies(timeout_ms));
90         }
91 }
92
93 static int kbasep_read_soft_event_status(
94                 struct kbase_context *kctx, u64 evt, unsigned char *status)
95 {
96         unsigned char *mapped_evt;
97         struct kbase_vmap_struct map;
98
99         mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
100         if (!mapped_evt)
101                 return -EFAULT;
102
103         *status = *mapped_evt;
104
105         kbase_vunmap(kctx, &map);
106
107         return 0;
108 }
109
110 static int kbasep_write_soft_event_status(
111                 struct kbase_context *kctx, u64 evt, unsigned char new_status)
112 {
113         unsigned char *mapped_evt;
114         struct kbase_vmap_struct map;
115
116         if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
117             (new_status != BASE_JD_SOFT_EVENT_RESET))
118                 return -EINVAL;
119
120         mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
121         if (!mapped_evt)
122                 return -EFAULT;
123
124         *mapped_evt = new_status;
125
126         kbase_vunmap(kctx, &map);
127
128         return 0;
129 }
130
131 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
132 {
133         struct kbase_vmap_struct map;
134         void *user_result;
135         struct timespec ts;
136         struct base_dump_cpu_gpu_counters data;
137         u64 system_time;
138         u64 cycle_counter;
139         u64 jc = katom->jc;
140         struct kbase_context *kctx = katom->kctx;
141         int pm_active_err;
142
143         memset(&data, 0, sizeof(data));
144
145         /* Take the PM active reference as late as possible - otherwise, it could
146          * delay suspend until we process the atom (which may be at the end of a
147          * long chain of dependencies */
148         pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
149         if (pm_active_err) {
150                 struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
151
152                 /* We're suspended - queue this on the list of suspended jobs
153                  * Use dep_item[1], because dep_item[0] was previously in use
154                  * for 'waiting_soft_jobs'.
155                  */
156                 mutex_lock(&js_devdata->runpool_mutex);
157                 list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
158                 mutex_unlock(&js_devdata->runpool_mutex);
159
160                 /* Also adding this to the list of waiting soft job */
161                 kbasep_add_waiting_soft_job(katom);
162
163                 return pm_active_err;
164         }
165
166         kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
167                                                                         &ts);
168
169         kbase_pm_context_idle(kctx->kbdev);
170
171         data.sec = ts.tv_sec;
172         data.usec = ts.tv_nsec / 1000;
173         data.system_time = system_time;
174         data.cycle_counter = cycle_counter;
175
176         /* Assume this atom will be cancelled until we know otherwise */
177         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
178
179         /* GPU_WR access is checked on the range for returning the result to
180          * userspace for the following reasons:
181          * - security, this is currently how imported user bufs are checked.
182          * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
183         user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
184         if (!user_result)
185                 return 0;
186
187         memcpy(user_result, &data, sizeof(data));
188
189         kbase_vunmap(kctx, &map);
190
191         /* Atom was fine - mark it as done */
192         katom->event_code = BASE_JD_EVENT_DONE;
193
194         return 0;
195 }
196
197 #ifdef CONFIG_SYNC
198
199 static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
200 {
201         struct sync_pt *pt;
202         struct sync_timeline *timeline;
203
204 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
205         if (!list_is_singular(&katom->fence->pt_list_head)) {
206 #else
207         if (katom->fence->num_fences != 1) {
208 #endif
209                 /* Not exactly one item in the list - so it didn't (directly) come from us */
210                 return BASE_JD_EVENT_JOB_CANCELLED;
211         }
212
213 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
214         pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list);
215 #else
216         pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
217 #endif
218         timeline = sync_pt_parent(pt);
219
220         if (!kbase_sync_timeline_is_ours(timeline)) {
221                 /* Fence has a sync_pt which isn't ours! */
222                 return BASE_JD_EVENT_JOB_CANCELLED;
223         }
224
225         kbase_sync_signal_pt(pt, result);
226
227         sync_timeline_signal(timeline);
228
229         return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
230 }
231
232 static void kbase_fence_wait_worker(struct work_struct *data)
233 {
234         struct kbase_jd_atom *katom;
235         struct kbase_context *kctx;
236
237         katom = container_of(data, struct kbase_jd_atom, work);
238         kctx = katom->kctx;
239
240         mutex_lock(&kctx->jctx.lock);
241         kbasep_remove_waiting_soft_job(katom);
242         kbase_finish_soft_job(katom);
243         if (jd_done_nolock(katom, NULL))
244                 kbase_js_sched_all(kctx->kbdev);
245         mutex_unlock(&kctx->jctx.lock);
246 }
247
248 static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
249 {
250         struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
251         struct kbase_context *kctx;
252
253         KBASE_DEBUG_ASSERT(NULL != katom);
254
255         kctx = katom->kctx;
256
257         KBASE_DEBUG_ASSERT(NULL != kctx);
258
259         /* Propagate the fence status to the atom.
260          * If negative then cancel this atom and its dependencies.
261          */
262         if (kbase_fence_get_status(fence) < 0)
263                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
264
265         /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
266          *
267          * The issue is that we may signal the timeline while holding kctx->jctx.lock and
268          * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
269          */
270
271         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
272         INIT_WORK(&katom->work, kbase_fence_wait_worker);
273         queue_work(kctx->jctx.job_done_wq, &katom->work);
274 }
275
276 static int kbase_fence_wait(struct kbase_jd_atom *katom)
277 {
278         int ret;
279
280         KBASE_DEBUG_ASSERT(NULL != katom);
281         KBASE_DEBUG_ASSERT(NULL != katom->kctx);
282
283         sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
284
285         ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
286
287         if (ret == 1) {
288                 /* Already signalled */
289                 return 0;
290         }
291
292         if (ret < 0) {
293                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
294                 /* We should cause the dependent jobs in the bag to be failed,
295                  * to do this we schedule the work queue to complete this job */
296                 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
297                 INIT_WORK(&katom->work, kbase_fence_wait_worker);
298                 queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
299         }
300
301 #ifdef CONFIG_MALI_FENCE_DEBUG
302         /* The timeout code will add this job to the list of waiting soft jobs.
303          */
304         kbasep_add_waiting_with_timeout(katom);
305 #else
306         kbasep_add_waiting_soft_job(katom);
307 #endif
308
309         return 1;
310 }
311
312 static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
313 {
314         if(!katom)
315         {
316                 pr_err("katom null.forbiden return\n");
317                 return;
318         }
319         if(!katom->fence)
320         {
321                 pr_info("katom->fence null.may release out of order.so continue unfinished step\n");
322                 /*
323                 if return here,may result in  infinite loop?
324                 we need to delete dep_item[0] from kctx->waiting_soft_jobs?
325                 jd_done_nolock function move the dep_item[0] to complete job list and then delete?
326                 */
327                 goto finish_softjob;
328         }
329
330         if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
331                 /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
332                 return;
333         }
334
335         /* Wait was cancelled - zap the atoms */
336 finish_softjob:
337         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
338
339         kbasep_remove_waiting_soft_job(katom);
340         kbase_finish_soft_job(katom);
341
342         if (jd_done_nolock(katom, NULL))
343                 kbase_js_sched_all(katom->kctx->kbdev);
344 }
345 #endif /* CONFIG_SYNC */
346
347 static void kbasep_soft_event_complete_job(struct work_struct *work)
348 {
349         struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
350                         work);
351         struct kbase_context *kctx = katom->kctx;
352         int resched;
353
354         mutex_lock(&kctx->jctx.lock);
355         resched = jd_done_nolock(katom, NULL);
356         mutex_unlock(&kctx->jctx.lock);
357
358         if (resched)
359                 kbase_js_sched_all(kctx->kbdev);
360 }
361
362 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
363 {
364         int cancel_timer = 1;
365         struct list_head *entry, *tmp;
366         unsigned long lflags;
367
368         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
369         list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
370                 struct kbase_jd_atom *katom = list_entry(
371                                 entry, struct kbase_jd_atom, queue);
372
373                 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
374                 case BASE_JD_REQ_SOFT_EVENT_WAIT:
375                         if (katom->jc == evt) {
376                                 list_del(&katom->queue);
377
378                                 katom->event_code = BASE_JD_EVENT_DONE;
379                                 INIT_WORK(&katom->work,
380                                           kbasep_soft_event_complete_job);
381                                 queue_work(kctx->jctx.job_done_wq,
382                                            &katom->work);
383                         } else {
384                                 /* There are still other waiting jobs, we cannot
385                                  * cancel the timer yet.
386                                  */
387                                 cancel_timer = 0;
388                         }
389                         break;
390 #ifdef CONFIG_MALI_FENCE_DEBUG
391                 case BASE_JD_REQ_SOFT_FENCE_WAIT:
392                         /* Keep the timer running if fence debug is enabled and
393                          * there are waiting fence jobs.
394                          */
395                         cancel_timer = 0;
396                         break;
397 #endif
398                 }
399         }
400
401         if (cancel_timer)
402                 del_timer(&kctx->soft_job_timeout);
403         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
404 }
405
406 #ifdef CONFIG_MALI_FENCE_DEBUG
407 static char *kbase_fence_debug_status_string(int status)
408 {
409         if (status == 0)
410                 return "signaled";
411         else if (status > 0)
412                 return "active";
413         else
414                 return "error";
415 }
416
417 static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
418 {
419         struct kbase_context *kctx = katom->kctx;
420         struct device *dev = kctx->kbdev->dev;
421         int i;
422
423         for (i = 0; i < 2; i++) {
424                 struct kbase_jd_atom *dep;
425
426                 list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
427                         if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
428                             dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
429                                 continue;
430
431                         if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
432                                         == BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
433                                 struct sync_fence *fence = dep->fence;
434                                 int status = kbase_fence_get_status(fence);
435
436                                 /* Found blocked trigger fence. */
437                                 dev_warn(dev,
438                                          "\tVictim trigger atom %d fence [%p] %s: %s\n",
439                                          kbase_jd_atom_id(kctx, dep),
440                                          fence, fence->name,
441                                          kbase_fence_debug_status_string(status));
442                         }
443
444                         kbase_fence_debug_check_atom(dep);
445                 }
446         }
447 }
448
449 static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
450 {
451         struct kbase_context *kctx = katom->kctx;
452         struct device *dev = katom->kctx->kbdev->dev;
453         struct sync_fence *fence = katom->fence;
454         int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
455         int status = kbase_fence_get_status(fence);
456         unsigned long lflags;
457
458         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
459
460         dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
461                  kctx->tgid, kctx->id,
462                  kbase_jd_atom_id(kctx, katom),
463                  fence, timeout_ms);
464         dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
465                  fence, fence->name,
466                  kbase_fence_debug_status_string(status));
467
468         /* Search for blocked trigger atoms */
469         kbase_fence_debug_check_atom(katom);
470
471         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
472
473         /* Dump out the full state of all the Android sync fences.
474          * The function sync_dump() isn't exported to modules, so force
475          * sync_fence_wait() to time out to trigger sync_dump().
476          */
477         sync_fence_wait(fence, 1);
478 }
479
480 struct kbase_fence_debug_work {
481         struct kbase_jd_atom *katom;
482         struct work_struct work;
483 };
484
485 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
486 {
487         struct kbase_fence_debug_work *w = container_of(work,
488                         struct kbase_fence_debug_work, work);
489         struct kbase_jd_atom *katom = w->katom;
490         struct kbase_context *kctx = katom->kctx;
491
492         mutex_lock(&kctx->jctx.lock);
493         kbase_fence_debug_wait_timeout(katom);
494         mutex_unlock(&kctx->jctx.lock);
495
496         kfree(w);
497 }
498
499 static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
500 {
501         struct kbase_fence_debug_work *work;
502         struct kbase_context *kctx = katom->kctx;
503
504         /* Enqueue fence debug worker. Use job_done_wq to get
505          * debug print ordered with job completion.
506          */
507         work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
508         /* Ignore allocation failure. */
509         if (work) {
510                 work->katom = katom;
511                 INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
512                 queue_work(kctx->jctx.job_done_wq, &work->work);
513         }
514 }
515 #endif /* CONFIG_MALI_FENCE_DEBUG */
516
517 void kbasep_soft_job_timeout_worker(unsigned long data)
518 {
519         struct kbase_context *kctx = (struct kbase_context *)data;
520         u32 timeout_ms = (u32)atomic_read(
521                         &kctx->kbdev->js_data.soft_job_timeout_ms);
522         struct timer_list *timer = &kctx->soft_job_timeout;
523         ktime_t cur_time = ktime_get();
524         bool restarting = false;
525         unsigned long lflags;
526         struct list_head *entry, *tmp;
527
528         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
529         list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
530                 struct kbase_jd_atom *katom = list_entry(entry,
531                                 struct kbase_jd_atom, queue);
532                 s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
533                                         katom->start_timestamp));
534
535                 if (elapsed_time < (s64)timeout_ms) {
536                         restarting = true;
537                         continue;
538                 }
539
540                 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
541                 case BASE_JD_REQ_SOFT_EVENT_WAIT:
542                         /* Take it out of the list to ensure that it
543                          * will be cancelled in all cases
544                          */
545                         list_del(&katom->queue);
546
547                         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
548                         INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
549                         queue_work(kctx->jctx.job_done_wq, &katom->work);
550                         break;
551 #ifdef CONFIG_MALI_FENCE_DEBUG
552                 case BASE_JD_REQ_SOFT_FENCE_WAIT:
553                         kbase_fence_debug_timeout(katom);
554                         break;
555 #endif
556                 }
557         }
558
559         if (restarting)
560                 mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
561         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
562 }
563
564 static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
565 {
566         struct kbase_context *kctx = katom->kctx;
567         unsigned char status;
568
569         /* The status of this soft-job is stored in jc */
570         if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
571                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
572                 return 0;
573         }
574
575         if (status == BASE_JD_SOFT_EVENT_SET)
576                 return 0; /* Event already set, nothing to do */
577
578         kbasep_add_waiting_with_timeout(katom);
579
580         return 1;
581 }
582
583 static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
584                                      unsigned char new_status)
585 {
586         /* Complete jobs waiting on the same event */
587         struct kbase_context *kctx = katom->kctx;
588
589         if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
590                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
591                 return;
592         }
593
594         if (new_status == BASE_JD_SOFT_EVENT_SET)
595                 kbasep_complete_triggered_soft_events(kctx, katom->jc);
596 }
597
598 /**
599  * kbase_soft_event_update() - Update soft event state
600  * @kctx: Pointer to context
601  * @event: Event to update
602  * @new_status: New status value of event
603  *
604  * Update the event, and wake up any atoms waiting for the event.
605  *
606  * Return: 0 on success, a negative error code on failure.
607  */
608 int kbase_soft_event_update(struct kbase_context *kctx,
609                              u64 event,
610                              unsigned char new_status)
611 {
612         int err = 0;
613
614         mutex_lock(&kctx->jctx.lock);
615
616         if (kbasep_write_soft_event_status(kctx, event, new_status)) {
617                 err = -ENOENT;
618                 goto out;
619         }
620
621         if (new_status == BASE_JD_SOFT_EVENT_SET)
622                 kbasep_complete_triggered_soft_events(kctx, event);
623
624 out:
625         mutex_unlock(&kctx->jctx.lock);
626
627         return err;
628 }
629
630 static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
631 {
632         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
633         if (jd_done_nolock(katom, NULL))
634                 kbase_js_sched_all(katom->kctx->kbdev);
635 }
636
637 struct kbase_debug_copy_buffer {
638         size_t size;
639         struct page **pages;
640         int nr_pages;
641         size_t offset;
642         /*To find memory region*/
643         u64 gpu_addr;
644
645         struct page **extres_pages;
646         int nr_extres_pages;
647 };
648
649 static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
650 {
651         struct page **pages = buffer->extres_pages;
652         int nr_pages = buffer->nr_extres_pages;
653
654         if (pages) {
655                 int i;
656
657                 for (i = 0; i < nr_pages; i++) {
658                         struct page *pg = pages[i];
659
660                         if (pg)
661                                 put_page(pg);
662                 }
663                 kfree(pages);
664         }
665 }
666
667 static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
668 {
669         struct kbase_debug_copy_buffer *buffers =
670                         (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
671         unsigned int i;
672         unsigned int nr = katom->nr_extres;
673
674         if (!buffers)
675                 return;
676
677         kbase_gpu_vm_lock(katom->kctx);
678         for (i = 0; i < nr; i++) {
679                 int p;
680                 struct kbase_va_region *reg;
681
682                 reg = kbase_region_tracker_find_region_enclosing_address(
683                                 katom->kctx, buffers[i].gpu_addr);
684
685                 if (!buffers[i].pages)
686                         break;
687                 for (p = 0; p < buffers[i].nr_pages; p++) {
688                         struct page *pg = buffers[i].pages[p];
689
690                         if (pg)
691                                 put_page(pg);
692                 }
693                 kfree(buffers[i].pages);
694                 if (reg && reg->gpu_alloc) {
695                         switch (reg->gpu_alloc->type) {
696                         case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
697                         {
698                                 free_user_buffer(&buffers[i]);
699                                 break;
700                         }
701                         default:
702                                 /* Nothing to be done. */
703                                 break;
704                         }
705                         kbase_mem_phy_alloc_put(reg->gpu_alloc);
706                 }
707         }
708         kbase_gpu_vm_unlock(katom->kctx);
709         kfree(buffers);
710
711         katom->jc = 0;
712 }
713
714 static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
715 {
716         struct kbase_debug_copy_buffer *buffers;
717         struct base_jd_debug_copy_buffer *user_buffers = NULL;
718         unsigned int i;
719         unsigned int nr = katom->nr_extres;
720         int ret = 0;
721         void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
722
723         if (!user_structs)
724                 return -EINVAL;
725
726         buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
727         if (!buffers) {
728                 ret = -ENOMEM;
729                 katom->jc = 0;
730                 goto out_cleanup;
731         }
732         katom->jc = (u64)(uintptr_t)buffers;
733
734         user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
735
736         if (!user_buffers) {
737                 ret = -ENOMEM;
738                 goto out_cleanup;
739         }
740
741         ret = copy_from_user(user_buffers, user_structs,
742                         sizeof(*user_buffers)*nr);
743         if (ret)
744                 goto out_cleanup;
745
746         for (i = 0; i < nr; i++) {
747                 u64 addr = user_buffers[i].address;
748                 u64 page_addr = addr & PAGE_MASK;
749                 u64 end_page_addr = addr + user_buffers[i].size - 1;
750                 u64 last_page_addr = end_page_addr & PAGE_MASK;
751                 int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
752                 int pinned_pages;
753                 struct kbase_va_region *reg;
754                 struct base_external_resource user_extres;
755
756                 if (!addr)
757                         continue;
758
759                 buffers[i].nr_pages = nr_pages;
760                 buffers[i].offset = addr & ~PAGE_MASK;
761                 if (buffers[i].offset >= PAGE_SIZE) {
762                         ret = -EINVAL;
763                         goto out_cleanup;
764                 }
765                 buffers[i].size = user_buffers[i].size;
766
767                 buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
768                                 GFP_KERNEL);
769                 if (!buffers[i].pages) {
770                         ret = -ENOMEM;
771                         goto out_cleanup;
772                 }
773
774                 pinned_pages = get_user_pages_fast(page_addr,
775                                         nr_pages,
776                                         1, /* Write */
777                                         buffers[i].pages);
778                 if (pinned_pages < 0) {
779                         ret = pinned_pages;
780                         goto out_cleanup;
781                 }
782                 if (pinned_pages != nr_pages) {
783                         ret = -EINVAL;
784                         goto out_cleanup;
785                 }
786
787                 user_extres = user_buffers[i].extres;
788                 if (user_extres.ext_resource == 0ULL) {
789                         ret = -EINVAL;
790                         goto out_cleanup;
791                 }
792
793                 buffers[i].gpu_addr = user_extres.ext_resource &
794                         ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
795                 kbase_gpu_vm_lock(katom->kctx);
796                 reg = kbase_region_tracker_find_region_enclosing_address(
797                                 katom->kctx, buffers[i].gpu_addr);
798
799                 if (NULL == reg || NULL == reg->cpu_alloc ||
800                                 (reg->flags & KBASE_REG_FREE)) {
801                         ret = -EINVAL;
802                         goto out_unlock;
803                 }
804                 kbase_mem_phy_alloc_get(reg->gpu_alloc);
805
806                 buffers[i].nr_extres_pages = reg->nr_pages;
807                 if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
808                         dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
809
810                 switch (reg->gpu_alloc->type) {
811                 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
812                 {
813                         struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
814                         unsigned long nr_pages =
815                                 alloc->imported.user_buf.nr_pages;
816
817                         if (alloc->imported.user_buf.mm != current->mm) {
818                                 ret = -EINVAL;
819                                 goto out_unlock;
820                         }
821                         buffers[i].extres_pages = kcalloc(nr_pages,
822                                         sizeof(struct page *), GFP_KERNEL);
823                         if (!buffers[i].extres_pages) {
824                                 ret = -ENOMEM;
825                                 goto out_unlock;
826                         }
827
828                         ret = get_user_pages_fast(
829                                         alloc->imported.user_buf.address,
830                                         nr_pages, 0,
831                                         buffers[i].extres_pages);
832                         if (ret != nr_pages)
833                                 goto out_unlock;
834                         ret = 0;
835                         break;
836                 }
837                 case KBASE_MEM_TYPE_IMPORTED_UMP:
838                 {
839                         dev_warn(katom->kctx->kbdev->dev,
840                                         "UMP is not supported for debug_copy jobs\n");
841                         ret = -EINVAL;
842                         goto out_unlock;
843                 }
844                 default:
845                         /* Nothing to be done. */
846                         break;
847                 }
848                 kbase_gpu_vm_unlock(katom->kctx);
849         }
850         kfree(user_buffers);
851
852         return ret;
853
854 out_unlock:
855         kbase_gpu_vm_unlock(katom->kctx);
856
857 out_cleanup:
858         kfree(buffers);
859         kfree(user_buffers);
860
861         /* Frees allocated memory for kbase_debug_copy_job struct, including
862          * members, and sets jc to 0 */
863         kbase_debug_copy_finish(katom);
864         return ret;
865 }
866
867 static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
868                 void *extres_page, struct page **pages, unsigned int nr_pages,
869                 unsigned int *target_page_nr, size_t offset, size_t *to_copy)
870 {
871         void *target_page = kmap(pages[*target_page_nr]);
872         size_t chunk = PAGE_SIZE-offset;
873
874         if (!target_page) {
875                 *target_page_nr += 1;
876                 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
877                 return;
878         }
879
880         chunk = min(chunk, *to_copy);
881
882         memcpy(target_page + offset, extres_page, chunk);
883         *to_copy -= chunk;
884
885         kunmap(pages[*target_page_nr]);
886
887         *target_page_nr += 1;
888         if (*target_page_nr >= nr_pages)
889                 return;
890
891         target_page = kmap(pages[*target_page_nr]);
892         if (!target_page) {
893                 *target_page_nr += 1;
894                 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
895                 return;
896         }
897
898         KBASE_DEBUG_ASSERT(target_page);
899
900         chunk = min(offset, *to_copy);
901         memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
902         *to_copy -= chunk;
903
904         kunmap(pages[*target_page_nr]);
905 }
906
907 static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
908                 struct kbase_debug_copy_buffer *buf_data)
909 {
910         unsigned int i;
911         unsigned int target_page_nr = 0;
912         struct kbase_va_region *reg;
913         struct page **pages = buf_data->pages;
914         u64 offset = buf_data->offset;
915         size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
916         size_t to_copy = min(extres_size, buf_data->size);
917         int ret = 0;
918
919         KBASE_DEBUG_ASSERT(pages != NULL);
920
921         kbase_gpu_vm_lock(kctx);
922         reg = kbase_region_tracker_find_region_enclosing_address(
923                         kctx, buf_data->gpu_addr);
924
925         if (!reg) {
926                 ret = -EINVAL;
927                 goto out_unlock;
928         }
929
930         switch (reg->gpu_alloc->type) {
931         case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
932         {
933                 for (i = 0; i < buf_data->nr_extres_pages; i++) {
934                         struct page *pg = buf_data->extres_pages[i];
935                         void *extres_page = kmap(pg);
936
937                         if (extres_page)
938                                 kbase_mem_copy_from_extres_page(kctx,
939                                                 extres_page, pages,
940                                                 buf_data->nr_pages,
941                                                 &target_page_nr,
942                                                 offset, &to_copy);
943
944                         kunmap(pg);
945                         if (target_page_nr >= buf_data->nr_pages)
946                                 break;
947                 }
948                 break;
949         }
950         break;
951 #ifdef CONFIG_DMA_SHARED_BUFFER
952         case KBASE_MEM_TYPE_IMPORTED_UMM: {
953                 struct dma_buf *dma_buf = reg->gpu_alloc->imported.umm.dma_buf;
954
955                 KBASE_DEBUG_ASSERT(dma_buf != NULL);
956                 KBASE_DEBUG_ASSERT(dma_buf->size ==
957                                    buf_data->nr_extres_pages * PAGE_SIZE);
958
959                 ret = dma_buf_begin_cpu_access(dma_buf,
960 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
961                                 0, buf_data->nr_extres_pages*PAGE_SIZE,
962 #endif
963                                 DMA_FROM_DEVICE);
964                 if (ret)
965                         goto out_unlock;
966
967                 for (i = 0; i < buf_data->nr_extres_pages; i++) {
968
969                         void *extres_page = dma_buf_kmap(dma_buf, i);
970
971                         if (extres_page)
972                                 kbase_mem_copy_from_extres_page(kctx,
973                                                 extres_page, pages,
974                                                 buf_data->nr_pages,
975                                                 &target_page_nr,
976                                                 offset, &to_copy);
977
978                         dma_buf_kunmap(dma_buf, i, extres_page);
979                         if (target_page_nr >= buf_data->nr_pages)
980                                 break;
981                 }
982                 dma_buf_end_cpu_access(dma_buf,
983 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
984                                 0, buf_data->nr_extres_pages*PAGE_SIZE,
985 #endif
986                                 DMA_FROM_DEVICE);
987                 break;
988         }
989 #endif
990         default:
991                 ret = -EINVAL;
992         }
993 out_unlock:
994         kbase_gpu_vm_unlock(kctx);
995         return ret;
996
997 }
998
999 static int kbase_debug_copy(struct kbase_jd_atom *katom)
1000 {
1001         struct kbase_debug_copy_buffer *buffers =
1002                         (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
1003         unsigned int i;
1004
1005         for (i = 0; i < katom->nr_extres; i++) {
1006                 int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
1007
1008                 if (res)
1009                         return res;
1010         }
1011
1012         return 0;
1013 }
1014
1015 static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
1016 {
1017         __user void *data = (__user void *)(uintptr_t) katom->jc;
1018         struct base_jit_alloc_info *info;
1019         int ret;
1020
1021         /* Fail the job if there is no info structure */
1022         if (!data) {
1023                 ret = -EINVAL;
1024                 goto fail;
1025         }
1026
1027         /* Copy the information for safe access and future storage */
1028         info = kzalloc(sizeof(*info), GFP_KERNEL);
1029         if (!info) {
1030                 ret = -ENOMEM;
1031                 goto fail;
1032         }
1033
1034         if (copy_from_user(info, data, sizeof(*info)) != 0) {
1035                 ret = -EINVAL;
1036                 goto free_info;
1037         }
1038
1039         /* If the ID is zero then fail the job */
1040         if (info->id == 0) {
1041                 ret = -EINVAL;
1042                 goto free_info;
1043         }
1044
1045         /* Sanity check that the PA fits within the VA */
1046         if (info->va_pages < info->commit_pages) {
1047                 ret = -EINVAL;
1048                 goto free_info;
1049         }
1050
1051         /* Ensure the GPU address is correctly aligned */
1052         if ((info->gpu_alloc_addr & 0x7) != 0) {
1053                 ret = -EINVAL;
1054                 goto free_info;
1055         }
1056
1057         /* Replace the user pointer with our kernel allocated info structure */
1058         katom->jc = (u64)(uintptr_t) info;
1059
1060         /*
1061          * Note:
1062          * The provided info->gpu_alloc_addr isn't validated here as
1063          * userland can cache allocations which means that even
1064          * though the region is valid it doesn't represent the
1065          * same thing it used to.
1066          *
1067          * Complete validation of va_pages, commit_pages and extent
1068          * isn't done here as it will be done during the call to
1069          * kbase_mem_alloc.
1070          */
1071         return 0;
1072
1073 free_info:
1074         kfree(info);
1075 fail:
1076         katom->jc = 0;
1077         return ret;
1078 }
1079
1080 static void kbase_jit_allocate_process(struct kbase_jd_atom *katom)
1081 {
1082         struct kbase_context *kctx = katom->kctx;
1083         struct base_jit_alloc_info *info;
1084         struct kbase_va_region *reg;
1085         struct kbase_vmap_struct mapping;
1086         u64 *ptr;
1087
1088         info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1089
1090         /* The JIT ID is still in use so fail the allocation */
1091         if (kctx->jit_alloc[info->id]) {
1092                 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1093                 return;
1094         }
1095
1096         /*
1097          * Mark the allocation so we know it's in use even if the
1098          * allocation itself fails.
1099          */
1100         kctx->jit_alloc[info->id] = (struct kbase_va_region *) -1;
1101
1102         /* Create a JIT allocation */
1103         reg = kbase_jit_allocate(kctx, info);
1104         if (!reg) {
1105                 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1106                 return;
1107         }
1108
1109         /*
1110          * Write the address of the JIT allocation to the user provided
1111          * GPU allocation.
1112          */
1113         ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
1114                         &mapping);
1115         if (!ptr) {
1116                 /*
1117                  * Leave the allocation "live" as the JIT free jit will be
1118                  * submitted anyway.
1119                  */
1120                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1121                 return;
1122         }
1123
1124         *ptr = reg->start_pfn << PAGE_SHIFT;
1125         kbase_vunmap(kctx, &mapping);
1126
1127         katom->event_code = BASE_JD_EVENT_DONE;
1128
1129         /*
1130          * Bind it to the user provided ID. Do this last so we can check for
1131          * the JIT free racing this JIT alloc job.
1132          */
1133         kctx->jit_alloc[info->id] = reg;
1134 }
1135
1136 static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
1137 {
1138         struct base_jit_alloc_info *info;
1139
1140         info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1141         /* Free the info structure */
1142         kfree(info);
1143 }
1144
1145 static void kbase_jit_free_process(struct kbase_jd_atom *katom)
1146 {
1147         struct kbase_context *kctx = katom->kctx;
1148         u8 id = (u8) katom->jc;
1149
1150         /*
1151          * If the ID is zero or it is not in use yet then fail the job.
1152          */
1153         if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
1154                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1155                 return;
1156         }
1157
1158         /*
1159          * If the ID is valid but the allocation request failed still succeed
1160          * this soft job but don't try and free the allocation.
1161          */
1162         if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
1163                 kbase_jit_free(kctx, kctx->jit_alloc[id]);
1164
1165         kctx->jit_alloc[id] = NULL;
1166 }
1167
1168 static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
1169 {
1170         __user struct base_external_resource_list *user_ext_res;
1171         struct base_external_resource_list *ext_res;
1172         u64 count = 0;
1173         size_t copy_size;
1174         int ret;
1175
1176         user_ext_res = (__user struct base_external_resource_list *)
1177                         (uintptr_t) katom->jc;
1178
1179         /* Fail the job if there is no info structure */
1180         if (!user_ext_res) {
1181                 ret = -EINVAL;
1182                 goto fail;
1183         }
1184
1185         if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
1186                 ret = -EINVAL;
1187                 goto fail;
1188         }
1189
1190         /* Is the number of external resources in range? */
1191         if (!count || count > BASE_EXT_RES_COUNT_MAX) {
1192                 ret = -EINVAL;
1193                 goto fail;
1194         }
1195
1196         /* Copy the information for safe access and future storage */
1197         copy_size = sizeof(*ext_res);
1198         copy_size += sizeof(struct base_external_resource) * (count - 1);
1199         ext_res = kzalloc(copy_size, GFP_KERNEL);
1200         if (!ext_res) {
1201                 ret = -ENOMEM;
1202                 goto fail;
1203         }
1204
1205         if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
1206                 ret = -EINVAL;
1207                 goto free_info;
1208         }
1209
1210         /*
1211          * Overwrite the count with the first value incase it was changed
1212          * after the fact.
1213          */
1214         ext_res->count = count;
1215
1216         /*
1217          * Replace the user pointer with our kernel allocated
1218          * ext_res structure.
1219          */
1220         katom->jc = (u64)(uintptr_t) ext_res;
1221
1222         return 0;
1223
1224 free_info:
1225         kfree(ext_res);
1226 fail:
1227         return ret;
1228 }
1229
1230 static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
1231 {
1232         struct base_external_resource_list *ext_res;
1233         int i;
1234         bool failed = false;
1235
1236         ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1237         if (!ext_res)
1238                 goto failed_jc;
1239
1240         kbase_gpu_vm_lock(katom->kctx);
1241
1242         for (i = 0; i < ext_res->count; i++) {
1243                 u64 gpu_addr;
1244
1245                 gpu_addr = ext_res->ext_res[i].ext_resource &
1246                                 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1247                 if (map) {
1248                         if (!kbase_sticky_resource_acquire(katom->kctx,
1249                                         gpu_addr))
1250                                 goto failed_loop;
1251                 } else
1252                         if (!kbase_sticky_resource_release(katom->kctx, NULL,
1253                                         gpu_addr))
1254                                 failed = true;
1255         }
1256
1257         /*
1258          * In the case of unmap we continue unmapping other resources in the
1259          * case of failure but will always report failure if _any_ unmap
1260          * request fails.
1261          */
1262         if (failed)
1263                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1264         else
1265                 katom->event_code = BASE_JD_EVENT_DONE;
1266
1267         kbase_gpu_vm_unlock(katom->kctx);
1268
1269         return;
1270
1271 failed_loop:
1272         while (--i > 0) {
1273                 u64 gpu_addr;
1274
1275                 gpu_addr = ext_res->ext_res[i].ext_resource &
1276                                 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1277
1278                 kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
1279         }
1280
1281         katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1282         kbase_gpu_vm_unlock(katom->kctx);
1283
1284 failed_jc:
1285         return;
1286 }
1287
1288 static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
1289 {
1290         struct base_external_resource_list *ext_res;
1291
1292         ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1293         /* Free the info structure */
1294         kfree(ext_res);
1295 }
1296
1297 int kbase_process_soft_job(struct kbase_jd_atom *katom)
1298 {
1299         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1300         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1301                 return kbase_dump_cpu_gpu_time(katom);
1302 #ifdef CONFIG_SYNC
1303         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1304                 KBASE_DEBUG_ASSERT(katom->fence != NULL);
1305                 katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1306                 /* Release the reference as we don't need it any more */
1307                 sync_fence_put(katom->fence);
1308                 katom->fence = NULL;
1309                 break;
1310         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1311                 return kbase_fence_wait(katom);
1312 #endif                          /* CONFIG_SYNC */
1313         case BASE_JD_REQ_SOFT_REPLAY:
1314                 return kbase_replay_process(katom);
1315         case BASE_JD_REQ_SOFT_EVENT_WAIT:
1316                 return kbasep_soft_event_wait(katom);
1317         case BASE_JD_REQ_SOFT_EVENT_SET:
1318                 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
1319                 break;
1320         case BASE_JD_REQ_SOFT_EVENT_RESET:
1321                 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
1322                 break;
1323         case BASE_JD_REQ_SOFT_DEBUG_COPY:
1324         {
1325                 int res = kbase_debug_copy(katom);
1326
1327                 if (res)
1328                         katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1329                 break;
1330         }
1331         case BASE_JD_REQ_SOFT_JIT_ALLOC:
1332                 kbase_jit_allocate_process(katom);
1333                 break;
1334         case BASE_JD_REQ_SOFT_JIT_FREE:
1335                 kbase_jit_free_process(katom);
1336                 break;
1337         case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1338                 kbase_ext_res_process(katom, true);
1339                 break;
1340         case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1341                 kbase_ext_res_process(katom, false);
1342                 break;
1343         }
1344
1345         /* Atom is complete */
1346         return 0;
1347 }
1348
1349 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
1350 {
1351         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1352 #ifdef CONFIG_SYNC
1353         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1354                 kbase_fence_cancel_wait(katom);
1355                 break;
1356 #endif
1357         case BASE_JD_REQ_SOFT_EVENT_WAIT:
1358                 kbasep_soft_event_cancel_job(katom);
1359                 break;
1360         default:
1361                 /* This soft-job doesn't support cancellation! */
1362                 KBASE_DEBUG_ASSERT(0);
1363         }
1364 }
1365
1366 int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
1367 {
1368         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1369         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1370                 {
1371                         if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
1372                                 return -EINVAL;
1373                 }
1374                 break;
1375 #ifdef CONFIG_SYNC
1376         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1377                 {
1378                         struct base_fence fence;
1379                         int fd;
1380
1381                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1382                                 return -EINVAL;
1383
1384                         fd = kbase_stream_create_fence(fence.basep.stream_fd);
1385                         if (fd < 0)
1386                                 return -EINVAL;
1387
1388                         katom->fence = sync_fence_fdget(fd);
1389
1390                         if (katom->fence == NULL) {
1391                                 /* The only way the fence can be NULL is if userspace closed it for us.
1392                                  * So we don't need to clear it up */
1393                                 return -EINVAL;
1394                         }
1395                         fence.basep.fd = fd;
1396                         if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
1397                                 katom->fence = NULL;
1398                                 sys_close(fd);
1399                                 return -EINVAL;
1400                         }
1401                 }
1402                 break;
1403         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1404                 {
1405                         struct base_fence fence;
1406
1407                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1408                                 return -EINVAL;
1409
1410                         /* Get a reference to the fence object */
1411                         katom->fence = sync_fence_fdget(fence.basep.fd);
1412                         if (katom->fence == NULL)
1413                                 return -EINVAL;
1414                 }
1415                 break;
1416 #endif                          /* CONFIG_SYNC */
1417         case BASE_JD_REQ_SOFT_JIT_ALLOC:
1418                 return kbase_jit_allocate_prepare(katom);
1419         case BASE_JD_REQ_SOFT_REPLAY:
1420         case BASE_JD_REQ_SOFT_JIT_FREE:
1421                 break;
1422         case BASE_JD_REQ_SOFT_EVENT_WAIT:
1423         case BASE_JD_REQ_SOFT_EVENT_SET:
1424         case BASE_JD_REQ_SOFT_EVENT_RESET:
1425                 if (katom->jc == 0)
1426                         return -EINVAL;
1427                 break;
1428         case BASE_JD_REQ_SOFT_DEBUG_COPY:
1429                 return kbase_debug_copy_prepare(katom);
1430         case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1431                 return kbase_ext_res_prepare(katom);
1432         case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1433                 return kbase_ext_res_prepare(katom);
1434         default:
1435                 /* Unsupported soft-job */
1436                 return -EINVAL;
1437         }
1438         return 0;
1439 }
1440
1441 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
1442 {
1443         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1444         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1445                 /* Nothing to do */
1446                 break;
1447 #ifdef CONFIG_SYNC
1448         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1449                 /* If fence has not yet been signalled, do it now */
1450                 if (katom->fence) {
1451                         kbase_fence_trigger(katom, katom->event_code ==
1452                                         BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1453                         sync_fence_put(katom->fence);
1454                         katom->fence = NULL;
1455                 }
1456                 break;
1457         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1458                 /* Release the reference to the fence object */
1459                 if(katom->fence) {
1460                         sync_fence_put(katom->fence);
1461                         katom->fence = NULL;
1462                 }
1463                 break;
1464 #endif                          /* CONFIG_SYNC */
1465
1466         case BASE_JD_REQ_SOFT_DEBUG_COPY:
1467                 kbase_debug_copy_finish(katom);
1468                 break;
1469         case BASE_JD_REQ_SOFT_JIT_ALLOC:
1470                 kbase_jit_allocate_finish(katom);
1471                 break;
1472         case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1473                 kbase_ext_res_finish(katom);
1474                 break;
1475         case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1476                 kbase_ext_res_finish(katom);
1477                 break;
1478         }
1479 }
1480
1481 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
1482 {
1483         LIST_HEAD(local_suspended_soft_jobs);
1484         struct kbase_jd_atom *tmp_iter;
1485         struct kbase_jd_atom *katom_iter;
1486         struct kbasep_js_device_data *js_devdata;
1487         bool resched = false;
1488
1489         KBASE_DEBUG_ASSERT(kbdev);
1490
1491         js_devdata = &kbdev->js_data;
1492
1493         /* Move out the entire list */
1494         mutex_lock(&js_devdata->runpool_mutex);
1495         list_splice_init(&js_devdata->suspended_soft_jobs_list,
1496                         &local_suspended_soft_jobs);
1497         mutex_unlock(&js_devdata->runpool_mutex);
1498
1499         /*
1500          * Each atom must be detached from the list and ran separately -
1501          * it could be re-added to the old list, but this is unlikely
1502          */
1503         list_for_each_entry_safe(katom_iter, tmp_iter,
1504                         &local_suspended_soft_jobs, dep_item[1]) {
1505                 struct kbase_context *kctx = katom_iter->kctx;
1506
1507                 mutex_lock(&kctx->jctx.lock);
1508
1509                 /* Remove from the global list */
1510                 list_del(&katom_iter->dep_item[1]);
1511                 /* Remove from the context's list of waiting soft jobs */
1512                 kbasep_remove_waiting_soft_job(katom_iter);
1513
1514                 if (kbase_process_soft_job(katom_iter) == 0) {
1515                         kbase_finish_soft_job(katom_iter);
1516                         resched |= jd_done_nolock(katom_iter, NULL);
1517                 } else {
1518                         KBASE_DEBUG_ASSERT((katom_iter->core_req &
1519                                         BASE_JD_REQ_SOFT_JOB_TYPE)
1520                                         != BASE_JD_REQ_SOFT_REPLAY);
1521                 }
1522
1523                 mutex_unlock(&kctx->jctx.lock);
1524         }
1525
1526         if (resched)
1527                 kbase_js_sched_all(kbdev);
1528 }