MALI: rockchip: upgrade midgard DDK to r14p0-01rel0
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard / mali_kbase_softjobs.c
1 /*
2  *
3  * (C) COPYRIGHT 2011-2016 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18
19
20 #include <mali_kbase.h>
21
22 #if defined(CONFIG_DMA_SHARED_BUFFER)
23 #include <linux/dma-buf.h>
24 #include <asm/cacheflush.h>
25 #endif /* defined(CONFIG_DMA_SHARED_BUFFER) */
26 #include <linux/dma-mapping.h>
27 #ifdef CONFIG_SYNC
28 #include "sync.h"
29 #include <linux/syscalls.h>
30 #include "mali_kbase_sync.h"
31 #endif
32 #include <mali_base_kernel.h>
33 #include <mali_kbase_hwaccess_time.h>
34 #include <mali_kbase_mem_linux.h>
35 #include <linux/version.h>
36 #include <linux/ktime.h>
37 #include <linux/pfn.h>
38 #include <linux/sched.h>
39
40 /* Mask to check cache alignment of data structures */
41 #define KBASE_CACHE_ALIGNMENT_MASK              ((1<<L1_CACHE_SHIFT)-1)
42
43 /**
44  * @file mali_kbase_softjobs.c
45  *
46  * This file implements the logic behind software only jobs that are
47  * executed within the driver rather than being handed over to the GPU.
48  */
49
50 void kbasep_add_waiting_soft_job(struct kbase_jd_atom *katom)
51 {
52         struct kbase_context *kctx = katom->kctx;
53         unsigned long lflags;
54
55         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
56         list_add_tail(&katom->queue, &kctx->waiting_soft_jobs);
57         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
58 }
59
60 void kbasep_remove_waiting_soft_job(struct kbase_jd_atom *katom)
61 {
62         struct kbase_context *kctx = katom->kctx;
63         unsigned long lflags;
64
65         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
66         list_del(&katom->queue);
67         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
68 }
69
70 static void kbasep_add_waiting_with_timeout(struct kbase_jd_atom *katom)
71 {
72         struct kbase_context *kctx = katom->kctx;
73
74         /* Record the start time of this atom so we could cancel it at
75          * the right time.
76          */
77         katom->start_timestamp = ktime_get();
78
79         /* Add the atom to the waiting list before the timer is
80          * (re)started to make sure that it gets processed.
81          */
82         kbasep_add_waiting_soft_job(katom);
83
84         /* Schedule timeout of this atom after a period if it is not active */
85         if (!timer_pending(&kctx->soft_job_timeout)) {
86                 int timeout_ms = atomic_read(
87                                 &kctx->kbdev->js_data.soft_job_timeout_ms);
88                 mod_timer(&kctx->soft_job_timeout,
89                           jiffies + msecs_to_jiffies(timeout_ms));
90         }
91 }
92
93 static int kbasep_read_soft_event_status(
94                 struct kbase_context *kctx, u64 evt, unsigned char *status)
95 {
96         unsigned char *mapped_evt;
97         struct kbase_vmap_struct map;
98
99         mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
100         if (!mapped_evt)
101                 return -EFAULT;
102
103         *status = *mapped_evt;
104
105         kbase_vunmap(kctx, &map);
106
107         return 0;
108 }
109
110 static int kbasep_write_soft_event_status(
111                 struct kbase_context *kctx, u64 evt, unsigned char new_status)
112 {
113         unsigned char *mapped_evt;
114         struct kbase_vmap_struct map;
115
116         if ((new_status != BASE_JD_SOFT_EVENT_SET) &&
117             (new_status != BASE_JD_SOFT_EVENT_RESET))
118                 return -EINVAL;
119
120         mapped_evt = kbase_vmap(kctx, evt, sizeof(*mapped_evt), &map);
121         if (!mapped_evt)
122                 return -EFAULT;
123
124         *mapped_evt = new_status;
125
126         kbase_vunmap(kctx, &map);
127
128         return 0;
129 }
130
131 static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom)
132 {
133         struct kbase_vmap_struct map;
134         void *user_result;
135         struct timespec ts;
136         struct base_dump_cpu_gpu_counters data;
137         u64 system_time;
138         u64 cycle_counter;
139         u64 jc = katom->jc;
140         struct kbase_context *kctx = katom->kctx;
141         int pm_active_err;
142
143         memset(&data, 0, sizeof(data));
144
145         /* Take the PM active reference as late as possible - otherwise, it could
146          * delay suspend until we process the atom (which may be at the end of a
147          * long chain of dependencies */
148         pm_active_err = kbase_pm_context_active_handle_suspend(kctx->kbdev, KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE);
149         if (pm_active_err) {
150                 struct kbasep_js_device_data *js_devdata = &kctx->kbdev->js_data;
151
152                 /* We're suspended - queue this on the list of suspended jobs
153                  * Use dep_item[1], because dep_item[0] was previously in use
154                  * for 'waiting_soft_jobs'.
155                  */
156                 mutex_lock(&js_devdata->runpool_mutex);
157                 list_add_tail(&katom->dep_item[1], &js_devdata->suspended_soft_jobs_list);
158                 mutex_unlock(&js_devdata->runpool_mutex);
159
160                 /* Also adding this to the list of waiting soft job */
161                 kbasep_add_waiting_soft_job(katom);
162
163                 return pm_active_err;
164         }
165
166         kbase_backend_get_gpu_time(kctx->kbdev, &cycle_counter, &system_time,
167                                                                         &ts);
168
169         kbase_pm_context_idle(kctx->kbdev);
170
171         data.sec = ts.tv_sec;
172         data.usec = ts.tv_nsec / 1000;
173         data.system_time = system_time;
174         data.cycle_counter = cycle_counter;
175
176         /* Assume this atom will be cancelled until we know otherwise */
177         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
178
179         /* GPU_WR access is checked on the range for returning the result to
180          * userspace for the following reasons:
181          * - security, this is currently how imported user bufs are checked.
182          * - userspace ddk guaranteed to assume region was mapped as GPU_WR */
183         user_result = kbase_vmap_prot(kctx, jc, sizeof(data), KBASE_REG_GPU_WR, &map);
184         if (!user_result)
185                 return 0;
186
187         memcpy(user_result, &data, sizeof(data));
188
189         kbase_vunmap(kctx, &map);
190
191         /* Atom was fine - mark it as done */
192         katom->event_code = BASE_JD_EVENT_DONE;
193
194         return 0;
195 }
196
197 #ifdef CONFIG_SYNC
198
199 static enum base_jd_event_code kbase_fence_trigger(struct kbase_jd_atom *katom, int result)
200 {
201         struct sync_pt *pt;
202         struct sync_timeline *timeline;
203
204 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
205         if (!list_is_singular(&katom->fence->pt_list_head)) {
206 #else
207         if (katom->fence->num_fences != 1) {
208 #endif
209                 /* Not exactly one item in the list - so it didn't (directly) come from us */
210                 return BASE_JD_EVENT_JOB_CANCELLED;
211         }
212
213 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
214         pt = list_first_entry(&katom->fence->pt_list_head, struct sync_pt, pt_list);
215 #else
216         pt = container_of(katom->fence->cbs[0].sync_pt, struct sync_pt, base);
217 #endif
218         timeline = sync_pt_parent(pt);
219
220         if (!kbase_sync_timeline_is_ours(timeline)) {
221                 /* Fence has a sync_pt which isn't ours! */
222                 return BASE_JD_EVENT_JOB_CANCELLED;
223         }
224
225         kbase_sync_signal_pt(pt, result);
226
227         sync_timeline_signal(timeline);
228
229         return (result < 0) ? BASE_JD_EVENT_JOB_CANCELLED : BASE_JD_EVENT_DONE;
230 }
231
232 static void kbase_fence_wait_worker(struct work_struct *data)
233 {
234         struct kbase_jd_atom *katom;
235         struct kbase_context *kctx;
236
237         katom = container_of(data, struct kbase_jd_atom, work);
238         kctx = katom->kctx;
239
240         mutex_lock(&kctx->jctx.lock);
241         kbasep_remove_waiting_soft_job(katom);
242         kbase_finish_soft_job(katom);
243         if (jd_done_nolock(katom, NULL))
244                 kbase_js_sched_all(kctx->kbdev);
245         mutex_unlock(&kctx->jctx.lock);
246 }
247
248 static void kbase_fence_wait_callback(struct sync_fence *fence, struct sync_fence_waiter *waiter)
249 {
250         struct kbase_jd_atom *katom = container_of(waiter, struct kbase_jd_atom, sync_waiter);
251         struct kbase_context *kctx;
252
253         KBASE_DEBUG_ASSERT(NULL != katom);
254
255         kctx = katom->kctx;
256
257         KBASE_DEBUG_ASSERT(NULL != kctx);
258
259         /* Propagate the fence status to the atom.
260          * If negative then cancel this atom and its dependencies.
261          */
262         if (kbase_fence_get_status(fence) < 0)
263                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
264
265         /* To prevent a potential deadlock we schedule the work onto the job_done_wq workqueue
266          *
267          * The issue is that we may signal the timeline while holding kctx->jctx.lock and
268          * the callbacks are run synchronously from sync_timeline_signal. So we simply defer the work.
269          */
270
271         KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
272         INIT_WORK(&katom->work, kbase_fence_wait_worker);
273         queue_work(kctx->jctx.job_done_wq, &katom->work);
274 }
275
276 static int kbase_fence_wait(struct kbase_jd_atom *katom)
277 {
278         int ret;
279
280         KBASE_DEBUG_ASSERT(NULL != katom);
281         KBASE_DEBUG_ASSERT(NULL != katom->kctx);
282
283         sync_fence_waiter_init(&katom->sync_waiter, kbase_fence_wait_callback);
284
285         ret = sync_fence_wait_async(katom->fence, &katom->sync_waiter);
286
287         if (ret == 1) {
288                 /* Already signalled */
289                 return 0;
290         }
291
292         if (ret < 0) {
293                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
294                 /* We should cause the dependent jobs in the bag to be failed,
295                  * to do this we schedule the work queue to complete this job */
296                 KBASE_DEBUG_ASSERT(0 == object_is_on_stack(&katom->work));
297                 INIT_WORK(&katom->work, kbase_fence_wait_worker);
298                 queue_work(katom->kctx->jctx.job_done_wq, &katom->work);
299         }
300
301 #ifdef CONFIG_MALI_FENCE_DEBUG
302         /* The timeout code will add this job to the list of waiting soft jobs.
303          */
304         kbasep_add_waiting_with_timeout(katom);
305 #else
306         kbasep_add_waiting_soft_job(katom);
307 #endif
308
309         return 1;
310 }
311
312 static void kbase_fence_cancel_wait(struct kbase_jd_atom *katom)
313 {
314         if(!katom)
315         {
316                 pr_err("katom null.forbiden return\n");
317                 return;
318         }
319         if(!katom->fence)
320         {
321                 pr_info("katom->fence null.may release out of order.so continue unfinished step\n");
322                 /*
323                 if return here,may result in  infinite loop?
324                 we need to delete dep_item[0] from kctx->waiting_soft_jobs?
325                 jd_done_nolock function move the dep_item[0] to complete job list and then delete?
326                 */
327                 goto finish_softjob;
328         }
329
330         if (sync_fence_cancel_async(katom->fence, &katom->sync_waiter) != 0) {
331                 /* The wait wasn't cancelled - leave the cleanup for kbase_fence_wait_callback */
332                 return;
333         }
334
335         /* Wait was cancelled - zap the atoms */
336 finish_softjob:
337         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
338
339         kbasep_remove_waiting_soft_job(katom);
340         kbase_finish_soft_job(katom);
341
342         if (jd_done_nolock(katom, NULL))
343                 kbase_js_sched_all(katom->kctx->kbdev);
344 }
345 #endif /* CONFIG_SYNC */
346
347 static void kbasep_soft_event_complete_job(struct work_struct *work)
348 {
349         struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
350                         work);
351         struct kbase_context *kctx = katom->kctx;
352         int resched;
353
354         mutex_lock(&kctx->jctx.lock);
355         resched = jd_done_nolock(katom, NULL);
356         mutex_unlock(&kctx->jctx.lock);
357
358         if (resched)
359                 kbase_js_sched_all(kctx->kbdev);
360 }
361
362 void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
363 {
364         int cancel_timer = 1;
365         struct list_head *entry, *tmp;
366         unsigned long lflags;
367
368         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
369         list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
370                 struct kbase_jd_atom *katom = list_entry(
371                                 entry, struct kbase_jd_atom, queue);
372
373                 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
374                 case BASE_JD_REQ_SOFT_EVENT_WAIT:
375                         if (katom->jc == evt) {
376                                 list_del(&katom->queue);
377
378                                 katom->event_code = BASE_JD_EVENT_DONE;
379                                 INIT_WORK(&katom->work,
380                                           kbasep_soft_event_complete_job);
381                                 queue_work(kctx->jctx.job_done_wq,
382                                            &katom->work);
383                         } else {
384                                 /* There are still other waiting jobs, we cannot
385                                  * cancel the timer yet.
386                                  */
387                                 cancel_timer = 0;
388                         }
389                         break;
390 #ifdef CONFIG_MALI_FENCE_DEBUG
391                 case BASE_JD_REQ_SOFT_FENCE_WAIT:
392                         /* Keep the timer running if fence debug is enabled and
393                          * there are waiting fence jobs.
394                          */
395                         cancel_timer = 0;
396                         break;
397 #endif
398                 }
399         }
400
401         if (cancel_timer)
402                 del_timer(&kctx->soft_job_timeout);
403         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
404 }
405
406 #ifdef CONFIG_MALI_FENCE_DEBUG
407 static char *kbase_fence_debug_status_string(int status)
408 {
409         if (status == 0)
410                 return "signaled";
411         else if (status > 0)
412                 return "active";
413         else
414                 return "error";
415 }
416
417 static void kbase_fence_debug_check_atom(struct kbase_jd_atom *katom)
418 {
419         struct kbase_context *kctx = katom->kctx;
420         struct device *dev = kctx->kbdev->dev;
421         int i;
422
423         for (i = 0; i < 2; i++) {
424                 struct kbase_jd_atom *dep;
425
426                 list_for_each_entry(dep, &katom->dep_head[i], dep_item[i]) {
427                         if (dep->status == KBASE_JD_ATOM_STATE_UNUSED ||
428                             dep->status == KBASE_JD_ATOM_STATE_COMPLETED)
429                                 continue;
430
431                         if ((dep->core_req & BASE_JD_REQ_SOFT_JOB_TYPE)
432                                         == BASE_JD_REQ_SOFT_FENCE_TRIGGER) {
433                                 struct sync_fence *fence = dep->fence;
434                                 int status = kbase_fence_get_status(fence);
435
436                                 /* Found blocked trigger fence. */
437                                 dev_warn(dev,
438                                          "\tVictim trigger atom %d fence [%p] %s: %s\n",
439                                          kbase_jd_atom_id(kctx, dep),
440                                          fence, fence->name,
441                                          kbase_fence_debug_status_string(status));
442                         }
443
444                         kbase_fence_debug_check_atom(dep);
445                 }
446         }
447 }
448
449 static void kbase_fence_debug_wait_timeout(struct kbase_jd_atom *katom)
450 {
451         struct kbase_context *kctx = katom->kctx;
452         struct device *dev = katom->kctx->kbdev->dev;
453         struct sync_fence *fence = katom->fence;
454         int timeout_ms = atomic_read(&kctx->kbdev->js_data.soft_job_timeout_ms);
455         int status = kbase_fence_get_status(fence);
456         unsigned long lflags;
457
458         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
459
460         dev_warn(dev, "ctx %d_%d: Atom %d still waiting for fence [%p] after %dms\n",
461                  kctx->tgid, kctx->id,
462                  kbase_jd_atom_id(kctx, katom),
463                  fence, timeout_ms);
464         dev_warn(dev, "\tGuilty fence [%p] %s: %s\n",
465                  fence, fence->name,
466                  kbase_fence_debug_status_string(status));
467
468         /* Search for blocked trigger atoms */
469         kbase_fence_debug_check_atom(katom);
470
471         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
472
473         /* Dump out the full state of all the Android sync fences.
474          * The function sync_dump() isn't exported to modules, so force
475          * sync_fence_wait() to time out to trigger sync_dump().
476          */
477         sync_fence_wait(fence, 1);
478 }
479
480 struct kbase_fence_debug_work {
481         struct kbase_jd_atom *katom;
482         struct work_struct work;
483 };
484
485 static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
486 {
487         struct kbase_fence_debug_work *w = container_of(work,
488                         struct kbase_fence_debug_work, work);
489         struct kbase_jd_atom *katom = w->katom;
490         struct kbase_context *kctx = katom->kctx;
491
492         mutex_lock(&kctx->jctx.lock);
493         kbase_fence_debug_wait_timeout(katom);
494         mutex_unlock(&kctx->jctx.lock);
495
496         kfree(w);
497 }
498
499 static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
500 {
501         struct kbase_fence_debug_work *work;
502         struct kbase_context *kctx = katom->kctx;
503
504         /* Enqueue fence debug worker. Use job_done_wq to get
505          * debug print ordered with job completion.
506          */
507         work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
508         /* Ignore allocation failure. */
509         if (work) {
510                 work->katom = katom;
511                 INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
512                 queue_work(kctx->jctx.job_done_wq, &work->work);
513         }
514 }
515 #endif /* CONFIG_MALI_FENCE_DEBUG */
516
517 void kbasep_soft_job_timeout_worker(unsigned long data)
518 {
519         struct kbase_context *kctx = (struct kbase_context *)data;
520         u32 timeout_ms = (u32)atomic_read(
521                         &kctx->kbdev->js_data.soft_job_timeout_ms);
522         struct timer_list *timer = &kctx->soft_job_timeout;
523         ktime_t cur_time = ktime_get();
524         bool restarting = false;
525         unsigned long lflags;
526         struct list_head *entry, *tmp;
527
528         spin_lock_irqsave(&kctx->waiting_soft_jobs_lock, lflags);
529         list_for_each_safe(entry, tmp, &kctx->waiting_soft_jobs) {
530                 struct kbase_jd_atom *katom = list_entry(entry,
531                                 struct kbase_jd_atom, queue);
532                 s64 elapsed_time = ktime_to_ms(ktime_sub(cur_time,
533                                         katom->start_timestamp));
534
535                 if (elapsed_time < (s64)timeout_ms) {
536                         restarting = true;
537                         continue;
538                 }
539
540                 switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
541                 case BASE_JD_REQ_SOFT_EVENT_WAIT:
542                         /* Take it out of the list to ensure that it
543                          * will be cancelled in all cases
544                          */
545                         list_del(&katom->queue);
546
547                         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
548                         INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
549                         queue_work(kctx->jctx.job_done_wq, &katom->work);
550                         break;
551 #ifdef CONFIG_MALI_FENCE_DEBUG
552                 case BASE_JD_REQ_SOFT_FENCE_WAIT:
553                         kbase_fence_debug_timeout(katom);
554                         break;
555 #endif
556                 }
557         }
558
559         if (restarting)
560                 mod_timer(timer, jiffies + msecs_to_jiffies(timeout_ms));
561         spin_unlock_irqrestore(&kctx->waiting_soft_jobs_lock, lflags);
562 }
563
564 static int kbasep_soft_event_wait(struct kbase_jd_atom *katom)
565 {
566         struct kbase_context *kctx = katom->kctx;
567         unsigned char status;
568
569         /* The status of this soft-job is stored in jc */
570         if (kbasep_read_soft_event_status(kctx, katom->jc, &status)) {
571                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
572                 return 0;
573         }
574
575         if (status == BASE_JD_SOFT_EVENT_SET)
576                 return 0; /* Event already set, nothing to do */
577
578         kbasep_add_waiting_with_timeout(katom);
579
580         return 1;
581 }
582
583 static void kbasep_soft_event_update_locked(struct kbase_jd_atom *katom,
584                                      unsigned char new_status)
585 {
586         /* Complete jobs waiting on the same event */
587         struct kbase_context *kctx = katom->kctx;
588
589         if (kbasep_write_soft_event_status(kctx, katom->jc, new_status) != 0) {
590                 katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
591                 return;
592         }
593
594         if (new_status == BASE_JD_SOFT_EVENT_SET)
595                 kbasep_complete_triggered_soft_events(kctx, katom->jc);
596 }
597
598 /**
599  * kbase_soft_event_update() - Update soft event state
600  * @kctx: Pointer to context
601  * @event: Event to update
602  * @new_status: New status value of event
603  *
604  * Update the event, and wake up any atoms waiting for the event.
605  *
606  * Return: 0 on success, a negative error code on failure.
607  */
608 int kbase_soft_event_update(struct kbase_context *kctx,
609                              u64 event,
610                              unsigned char new_status)
611 {
612         int err = 0;
613
614         mutex_lock(&kctx->jctx.lock);
615
616         if (kbasep_write_soft_event_status(kctx, event, new_status)) {
617                 err = -ENOENT;
618                 goto out;
619         }
620
621         if (new_status == BASE_JD_SOFT_EVENT_SET)
622                 kbasep_complete_triggered_soft_events(kctx, event);
623
624 out:
625         mutex_unlock(&kctx->jctx.lock);
626
627         return err;
628 }
629
630 static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
631 {
632         katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
633         if (jd_done_nolock(katom, NULL))
634                 kbase_js_sched_all(katom->kctx->kbdev);
635 }
636
637 struct kbase_debug_copy_buffer {
638         size_t size;
639         struct page **pages;
640         int nr_pages;
641         size_t offset;
642         struct kbase_mem_phy_alloc *gpu_alloc;
643
644         struct page **extres_pages;
645         int nr_extres_pages;
646 };
647
648 static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
649 {
650         struct page **pages = buffer->extres_pages;
651         int nr_pages = buffer->nr_extres_pages;
652
653         if (pages) {
654                 int i;
655
656                 for (i = 0; i < nr_pages; i++) {
657                         struct page *pg = pages[i];
658
659                         if (pg)
660                                 put_page(pg);
661                 }
662                 kfree(pages);
663         }
664 }
665
666 static void kbase_debug_copy_finish(struct kbase_jd_atom *katom)
667 {
668         struct kbase_debug_copy_buffer *buffers =
669                         (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
670         unsigned int i;
671         unsigned int nr = katom->nr_extres;
672
673         if (!buffers)
674                 return;
675
676         kbase_gpu_vm_lock(katom->kctx);
677         for (i = 0; i < nr; i++) {
678                 int p;
679                 struct kbase_mem_phy_alloc *gpu_alloc = buffers[i].gpu_alloc;
680
681                 if (!buffers[i].pages)
682                         break;
683                 for (p = 0; p < buffers[i].nr_pages; p++) {
684                         struct page *pg = buffers[i].pages[p];
685
686                         if (pg)
687                                 put_page(pg);
688                 }
689                 kfree(buffers[i].pages);
690                 if (gpu_alloc) {
691                         switch (gpu_alloc->type) {
692                         case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
693                         {
694                                 free_user_buffer(&buffers[i]);
695                                 break;
696                         }
697                         default:
698                                 /* Nothing to be done. */
699                                 break;
700                         }
701                         kbase_mem_phy_alloc_put(gpu_alloc);
702                 }
703         }
704         kbase_gpu_vm_unlock(katom->kctx);
705         kfree(buffers);
706
707         katom->jc = 0;
708 }
709
710 static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom)
711 {
712         struct kbase_debug_copy_buffer *buffers;
713         struct base_jd_debug_copy_buffer *user_buffers = NULL;
714         unsigned int i;
715         unsigned int nr = katom->nr_extres;
716         int ret = 0;
717         void __user *user_structs = (void __user *)(uintptr_t)katom->jc;
718
719         if (!user_structs)
720                 return -EINVAL;
721
722         buffers = kcalloc(nr, sizeof(*buffers), GFP_KERNEL);
723         if (!buffers) {
724                 ret = -ENOMEM;
725                 katom->jc = 0;
726                 goto out_cleanup;
727         }
728         katom->jc = (u64)(uintptr_t)buffers;
729
730         user_buffers = kmalloc_array(nr, sizeof(*user_buffers), GFP_KERNEL);
731
732         if (!user_buffers) {
733                 ret = -ENOMEM;
734                 goto out_cleanup;
735         }
736
737         ret = copy_from_user(user_buffers, user_structs,
738                         sizeof(*user_buffers)*nr);
739         if (ret)
740                 goto out_cleanup;
741
742         for (i = 0; i < nr; i++) {
743                 u64 addr = user_buffers[i].address;
744                 u64 page_addr = addr & PAGE_MASK;
745                 u64 end_page_addr = addr + user_buffers[i].size - 1;
746                 u64 last_page_addr = end_page_addr & PAGE_MASK;
747                 int nr_pages = (last_page_addr-page_addr)/PAGE_SIZE+1;
748                 int pinned_pages;
749                 struct kbase_va_region *reg;
750                 struct base_external_resource user_extres;
751
752                 if (!addr)
753                         continue;
754
755                 buffers[i].nr_pages = nr_pages;
756                 buffers[i].offset = addr & ~PAGE_MASK;
757                 if (buffers[i].offset >= PAGE_SIZE) {
758                         ret = -EINVAL;
759                         goto out_cleanup;
760                 }
761                 buffers[i].size = user_buffers[i].size;
762
763                 buffers[i].pages = kcalloc(nr_pages, sizeof(struct page *),
764                                 GFP_KERNEL);
765                 if (!buffers[i].pages) {
766                         ret = -ENOMEM;
767                         goto out_cleanup;
768                 }
769
770                 pinned_pages = get_user_pages_fast(page_addr,
771                                         nr_pages,
772                                         1, /* Write */
773                                         buffers[i].pages);
774                 if (pinned_pages < 0) {
775                         ret = pinned_pages;
776                         goto out_cleanup;
777                 }
778                 if (pinned_pages != nr_pages) {
779                         ret = -EINVAL;
780                         goto out_cleanup;
781                 }
782
783                 user_extres = user_buffers[i].extres;
784                 if (user_extres.ext_resource == 0ULL) {
785                         ret = -EINVAL;
786                         goto out_cleanup;
787                 }
788
789                 kbase_gpu_vm_lock(katom->kctx);
790                 reg = kbase_region_tracker_find_region_enclosing_address(
791                                 katom->kctx, user_extres.ext_resource &
792                                 ~BASE_EXT_RES_ACCESS_EXCLUSIVE);
793
794                 if (NULL == reg || NULL == reg->gpu_alloc ||
795                                 (reg->flags & KBASE_REG_FREE)) {
796                         ret = -EINVAL;
797                         goto out_unlock;
798                 }
799
800                 buffers[i].gpu_alloc = kbase_mem_phy_alloc_get(reg->gpu_alloc);
801                 buffers[i].nr_extres_pages = reg->nr_pages;
802
803                 if (reg->nr_pages*PAGE_SIZE != buffers[i].size)
804                         dev_warn(katom->kctx->kbdev->dev, "Copy buffer is not of same size as the external resource to copy.\n");
805
806                 switch (reg->gpu_alloc->type) {
807                 case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
808                 {
809                         struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
810                         unsigned long nr_pages =
811                                 alloc->imported.user_buf.nr_pages;
812
813                         if (alloc->imported.user_buf.mm != current->mm) {
814                                 ret = -EINVAL;
815                                 goto out_unlock;
816                         }
817                         buffers[i].extres_pages = kcalloc(nr_pages,
818                                         sizeof(struct page *), GFP_KERNEL);
819                         if (!buffers[i].extres_pages) {
820                                 ret = -ENOMEM;
821                                 goto out_unlock;
822                         }
823
824                         ret = get_user_pages_fast(
825                                         alloc->imported.user_buf.address,
826                                         nr_pages, 0,
827                                         buffers[i].extres_pages);
828                         if (ret != nr_pages)
829                                 goto out_unlock;
830                         ret = 0;
831                         break;
832                 }
833                 case KBASE_MEM_TYPE_IMPORTED_UMP:
834                 {
835                         dev_warn(katom->kctx->kbdev->dev,
836                                         "UMP is not supported for debug_copy jobs\n");
837                         ret = -EINVAL;
838                         goto out_unlock;
839                 }
840                 default:
841                         /* Nothing to be done. */
842                         break;
843                 }
844                 kbase_gpu_vm_unlock(katom->kctx);
845         }
846         kfree(user_buffers);
847
848         return ret;
849
850 out_unlock:
851         kbase_gpu_vm_unlock(katom->kctx);
852
853 out_cleanup:
854         kfree(buffers);
855         kfree(user_buffers);
856
857         /* Frees allocated memory for kbase_debug_copy_job struct, including
858          * members, and sets jc to 0 */
859         kbase_debug_copy_finish(katom);
860         return ret;
861 }
862
863 static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
864                 void *extres_page, struct page **pages, unsigned int nr_pages,
865                 unsigned int *target_page_nr, size_t offset, size_t *to_copy)
866 {
867         void *target_page = kmap(pages[*target_page_nr]);
868         size_t chunk = PAGE_SIZE-offset;
869
870         if (!target_page) {
871                 *target_page_nr += 1;
872                 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
873                 return;
874         }
875
876         chunk = min(chunk, *to_copy);
877
878         memcpy(target_page + offset, extres_page, chunk);
879         *to_copy -= chunk;
880
881         kunmap(pages[*target_page_nr]);
882
883         *target_page_nr += 1;
884         if (*target_page_nr >= nr_pages)
885                 return;
886
887         target_page = kmap(pages[*target_page_nr]);
888         if (!target_page) {
889                 *target_page_nr += 1;
890                 dev_warn(kctx->kbdev->dev, "kmap failed in debug_copy job.");
891                 return;
892         }
893
894         KBASE_DEBUG_ASSERT(target_page);
895
896         chunk = min(offset, *to_copy);
897         memcpy(target_page, extres_page + PAGE_SIZE-offset, chunk);
898         *to_copy -= chunk;
899
900         kunmap(pages[*target_page_nr]);
901 }
902
903 static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
904                 struct kbase_debug_copy_buffer *buf_data)
905 {
906         unsigned int i;
907         unsigned int target_page_nr = 0;
908         struct page **pages = buf_data->pages;
909         u64 offset = buf_data->offset;
910         size_t extres_size = buf_data->nr_extres_pages*PAGE_SIZE;
911         size_t to_copy = min(extres_size, buf_data->size);
912         struct kbase_mem_phy_alloc *gpu_alloc = buf_data->gpu_alloc;
913         int ret = 0;
914
915         KBASE_DEBUG_ASSERT(pages != NULL);
916
917         kbase_gpu_vm_lock(kctx);
918         if (!gpu_alloc) {
919                 ret = -EINVAL;
920                 goto out_unlock;
921         }
922
923         switch (gpu_alloc->type) {
924         case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
925         {
926                 for (i = 0; i < buf_data->nr_extres_pages; i++) {
927                         struct page *pg = buf_data->extres_pages[i];
928                         void *extres_page = kmap(pg);
929
930                         if (extres_page)
931                                 kbase_mem_copy_from_extres_page(kctx,
932                                                 extres_page, pages,
933                                                 buf_data->nr_pages,
934                                                 &target_page_nr,
935                                                 offset, &to_copy);
936
937                         kunmap(pg);
938                         if (target_page_nr >= buf_data->nr_pages)
939                                 break;
940                 }
941                 break;
942         }
943         break;
944 #ifdef CONFIG_DMA_SHARED_BUFFER
945         case KBASE_MEM_TYPE_IMPORTED_UMM: {
946                 struct dma_buf *dma_buf = gpu_alloc->imported.umm.dma_buf;
947
948                 KBASE_DEBUG_ASSERT(dma_buf != NULL);
949                 KBASE_DEBUG_ASSERT(dma_buf->size ==
950                                    buf_data->nr_extres_pages * PAGE_SIZE);
951
952                 ret = dma_buf_begin_cpu_access(dma_buf,
953 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
954                                 0, buf_data->nr_extres_pages*PAGE_SIZE,
955 #endif
956                                 DMA_FROM_DEVICE);
957                 if (ret)
958                         goto out_unlock;
959
960                 for (i = 0; i < buf_data->nr_extres_pages; i++) {
961
962                         void *extres_page = dma_buf_kmap(dma_buf, i);
963
964                         if (extres_page)
965                                 kbase_mem_copy_from_extres_page(kctx,
966                                                 extres_page, pages,
967                                                 buf_data->nr_pages,
968                                                 &target_page_nr,
969                                                 offset, &to_copy);
970
971                         dma_buf_kunmap(dma_buf, i, extres_page);
972                         if (target_page_nr >= buf_data->nr_pages)
973                                 break;
974                 }
975                 dma_buf_end_cpu_access(dma_buf,
976 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) && !defined(CONFIG_CHROMEOS)
977                                 0, buf_data->nr_extres_pages*PAGE_SIZE,
978 #endif
979                                 DMA_FROM_DEVICE);
980                 break;
981         }
982 #endif
983         default:
984                 ret = -EINVAL;
985         }
986 out_unlock:
987         kbase_gpu_vm_unlock(kctx);
988         return ret;
989
990 }
991
992 static int kbase_debug_copy(struct kbase_jd_atom *katom)
993 {
994         struct kbase_debug_copy_buffer *buffers =
995                         (struct kbase_debug_copy_buffer *)(uintptr_t)katom->jc;
996         unsigned int i;
997
998         for (i = 0; i < katom->nr_extres; i++) {
999                 int res = kbase_mem_copy_from_extres(katom->kctx, &buffers[i]);
1000
1001                 if (res)
1002                         return res;
1003         }
1004
1005         return 0;
1006 }
1007
1008 static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
1009 {
1010         __user void *data = (__user void *)(uintptr_t) katom->jc;
1011         struct base_jit_alloc_info *info;
1012         int ret;
1013
1014         /* Fail the job if there is no info structure */
1015         if (!data) {
1016                 ret = -EINVAL;
1017                 goto fail;
1018         }
1019
1020         /* Copy the information for safe access and future storage */
1021         info = kzalloc(sizeof(*info), GFP_KERNEL);
1022         if (!info) {
1023                 ret = -ENOMEM;
1024                 goto fail;
1025         }
1026
1027         if (copy_from_user(info, data, sizeof(*info)) != 0) {
1028                 ret = -EINVAL;
1029                 goto free_info;
1030         }
1031
1032         /* If the ID is zero then fail the job */
1033         if (info->id == 0) {
1034                 ret = -EINVAL;
1035                 goto free_info;
1036         }
1037
1038         /* Sanity check that the PA fits within the VA */
1039         if (info->va_pages < info->commit_pages) {
1040                 ret = -EINVAL;
1041                 goto free_info;
1042         }
1043
1044         /* Ensure the GPU address is correctly aligned */
1045         if ((info->gpu_alloc_addr & 0x7) != 0) {
1046                 ret = -EINVAL;
1047                 goto free_info;
1048         }
1049
1050         /* Replace the user pointer with our kernel allocated info structure */
1051         katom->jc = (u64)(uintptr_t) info;
1052
1053         /*
1054          * Note:
1055          * The provided info->gpu_alloc_addr isn't validated here as
1056          * userland can cache allocations which means that even
1057          * though the region is valid it doesn't represent the
1058          * same thing it used to.
1059          *
1060          * Complete validation of va_pages, commit_pages and extent
1061          * isn't done here as it will be done during the call to
1062          * kbase_mem_alloc.
1063          */
1064         return 0;
1065
1066 free_info:
1067         kfree(info);
1068 fail:
1069         katom->jc = 0;
1070         return ret;
1071 }
1072
1073 static void kbase_jit_allocate_process(struct kbase_jd_atom *katom)
1074 {
1075         struct kbase_context *kctx = katom->kctx;
1076         struct base_jit_alloc_info *info;
1077         struct kbase_va_region *reg;
1078         struct kbase_vmap_struct mapping;
1079         u64 *ptr;
1080
1081         info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1082
1083         /* The JIT ID is still in use so fail the allocation */
1084         if (kctx->jit_alloc[info->id]) {
1085                 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1086                 return;
1087         }
1088
1089         /*
1090          * Mark the allocation so we know it's in use even if the
1091          * allocation itself fails.
1092          */
1093         kctx->jit_alloc[info->id] = (struct kbase_va_region *) -1;
1094
1095         /* Create a JIT allocation */
1096         reg = kbase_jit_allocate(kctx, info);
1097         if (!reg) {
1098                 katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
1099                 return;
1100         }
1101
1102         /*
1103          * Write the address of the JIT allocation to the user provided
1104          * GPU allocation.
1105          */
1106         ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
1107                         &mapping);
1108         if (!ptr) {
1109                 /*
1110                  * Leave the allocation "live" as the JIT free jit will be
1111                  * submitted anyway.
1112                  */
1113                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1114                 return;
1115         }
1116
1117         *ptr = reg->start_pfn << PAGE_SHIFT;
1118         kbase_vunmap(kctx, &mapping);
1119
1120         katom->event_code = BASE_JD_EVENT_DONE;
1121
1122         /*
1123          * Bind it to the user provided ID. Do this last so we can check for
1124          * the JIT free racing this JIT alloc job.
1125          */
1126         kctx->jit_alloc[info->id] = reg;
1127 }
1128
1129 static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
1130 {
1131         struct base_jit_alloc_info *info;
1132
1133         info = (struct base_jit_alloc_info *) (uintptr_t) katom->jc;
1134         /* Free the info structure */
1135         kfree(info);
1136 }
1137
1138 static void kbase_jit_free_process(struct kbase_jd_atom *katom)
1139 {
1140         struct kbase_context *kctx = katom->kctx;
1141         u8 id = (u8) katom->jc;
1142
1143         /*
1144          * If the ID is zero or it is not in use yet then fail the job.
1145          */
1146         if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
1147                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1148                 return;
1149         }
1150
1151         /*
1152          * If the ID is valid but the allocation request failed still succeed
1153          * this soft job but don't try and free the allocation.
1154          */
1155         if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
1156                 kbase_jit_free(kctx, kctx->jit_alloc[id]);
1157
1158         kctx->jit_alloc[id] = NULL;
1159 }
1160
1161 static int kbase_ext_res_prepare(struct kbase_jd_atom *katom)
1162 {
1163         __user struct base_external_resource_list *user_ext_res;
1164         struct base_external_resource_list *ext_res;
1165         u64 count = 0;
1166         size_t copy_size;
1167         int ret;
1168
1169         user_ext_res = (__user struct base_external_resource_list *)
1170                         (uintptr_t) katom->jc;
1171
1172         /* Fail the job if there is no info structure */
1173         if (!user_ext_res) {
1174                 ret = -EINVAL;
1175                 goto fail;
1176         }
1177
1178         if (copy_from_user(&count, &user_ext_res->count, sizeof(u64)) != 0) {
1179                 ret = -EINVAL;
1180                 goto fail;
1181         }
1182
1183         /* Is the number of external resources in range? */
1184         if (!count || count > BASE_EXT_RES_COUNT_MAX) {
1185                 ret = -EINVAL;
1186                 goto fail;
1187         }
1188
1189         /* Copy the information for safe access and future storage */
1190         copy_size = sizeof(*ext_res);
1191         copy_size += sizeof(struct base_external_resource) * (count - 1);
1192         ext_res = kzalloc(copy_size, GFP_KERNEL);
1193         if (!ext_res) {
1194                 ret = -ENOMEM;
1195                 goto fail;
1196         }
1197
1198         if (copy_from_user(ext_res, user_ext_res, copy_size) != 0) {
1199                 ret = -EINVAL;
1200                 goto free_info;
1201         }
1202
1203         /*
1204          * Overwrite the count with the first value incase it was changed
1205          * after the fact.
1206          */
1207         ext_res->count = count;
1208
1209         /*
1210          * Replace the user pointer with our kernel allocated
1211          * ext_res structure.
1212          */
1213         katom->jc = (u64)(uintptr_t) ext_res;
1214
1215         return 0;
1216
1217 free_info:
1218         kfree(ext_res);
1219 fail:
1220         return ret;
1221 }
1222
1223 static void kbase_ext_res_process(struct kbase_jd_atom *katom, bool map)
1224 {
1225         struct base_external_resource_list *ext_res;
1226         int i;
1227         bool failed = false;
1228
1229         ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1230         if (!ext_res)
1231                 goto failed_jc;
1232
1233         kbase_gpu_vm_lock(katom->kctx);
1234
1235         for (i = 0; i < ext_res->count; i++) {
1236                 u64 gpu_addr;
1237
1238                 gpu_addr = ext_res->ext_res[i].ext_resource &
1239                                 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1240                 if (map) {
1241                         if (!kbase_sticky_resource_acquire(katom->kctx,
1242                                         gpu_addr))
1243                                 goto failed_loop;
1244                 } else
1245                         if (!kbase_sticky_resource_release(katom->kctx, NULL,
1246                                         gpu_addr))
1247                                 failed = true;
1248         }
1249
1250         /*
1251          * In the case of unmap we continue unmapping other resources in the
1252          * case of failure but will always report failure if _any_ unmap
1253          * request fails.
1254          */
1255         if (failed)
1256                 katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1257         else
1258                 katom->event_code = BASE_JD_EVENT_DONE;
1259
1260         kbase_gpu_vm_unlock(katom->kctx);
1261
1262         return;
1263
1264 failed_loop:
1265         while (--i > 0) {
1266                 u64 gpu_addr;
1267
1268                 gpu_addr = ext_res->ext_res[i].ext_resource &
1269                                 ~BASE_EXT_RES_ACCESS_EXCLUSIVE;
1270
1271                 kbase_sticky_resource_release(katom->kctx, NULL, gpu_addr);
1272         }
1273
1274         katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1275         kbase_gpu_vm_unlock(katom->kctx);
1276
1277 failed_jc:
1278         return;
1279 }
1280
1281 static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
1282 {
1283         struct base_external_resource_list *ext_res;
1284
1285         ext_res = (struct base_external_resource_list *) (uintptr_t) katom->jc;
1286         /* Free the info structure */
1287         kfree(ext_res);
1288 }
1289
1290 int kbase_process_soft_job(struct kbase_jd_atom *katom)
1291 {
1292         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1293         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1294                 return kbase_dump_cpu_gpu_time(katom);
1295 #ifdef CONFIG_SYNC
1296         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1297                 KBASE_DEBUG_ASSERT(katom->fence != NULL);
1298                 katom->event_code = kbase_fence_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1299                 /* Release the reference as we don't need it any more */
1300                 sync_fence_put(katom->fence);
1301                 katom->fence = NULL;
1302                 break;
1303         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1304                 return kbase_fence_wait(katom);
1305 #endif                          /* CONFIG_SYNC */
1306         case BASE_JD_REQ_SOFT_REPLAY:
1307                 return kbase_replay_process(katom);
1308         case BASE_JD_REQ_SOFT_EVENT_WAIT:
1309                 return kbasep_soft_event_wait(katom);
1310         case BASE_JD_REQ_SOFT_EVENT_SET:
1311                 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
1312                 break;
1313         case BASE_JD_REQ_SOFT_EVENT_RESET:
1314                 kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_RESET);
1315                 break;
1316         case BASE_JD_REQ_SOFT_DEBUG_COPY:
1317         {
1318                 int res = kbase_debug_copy(katom);
1319
1320                 if (res)
1321                         katom->event_code = BASE_JD_EVENT_JOB_INVALID;
1322                 break;
1323         }
1324         case BASE_JD_REQ_SOFT_JIT_ALLOC:
1325                 return -EINVAL; /* Temporarily disabled */
1326                 kbase_jit_allocate_process(katom);
1327                 break;
1328         case BASE_JD_REQ_SOFT_JIT_FREE:
1329                 return -EINVAL; /* Temporarily disabled */
1330                 kbase_jit_free_process(katom);
1331                 break;
1332         case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1333                 kbase_ext_res_process(katom, true);
1334                 break;
1335         case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1336                 kbase_ext_res_process(katom, false);
1337                 break;
1338         }
1339
1340         /* Atom is complete */
1341         return 0;
1342 }
1343
1344 void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
1345 {
1346         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1347 #ifdef CONFIG_SYNC
1348         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1349                 kbase_fence_cancel_wait(katom);
1350                 break;
1351 #endif
1352         case BASE_JD_REQ_SOFT_EVENT_WAIT:
1353                 kbasep_soft_event_cancel_job(katom);
1354                 break;
1355         default:
1356                 /* This soft-job doesn't support cancellation! */
1357                 KBASE_DEBUG_ASSERT(0);
1358         }
1359 }
1360
1361 int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
1362 {
1363         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1364         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1365                 {
1366                         if (0 != (katom->jc & KBASE_CACHE_ALIGNMENT_MASK))
1367                                 return -EINVAL;
1368                 }
1369                 break;
1370 #ifdef CONFIG_SYNC
1371         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1372                 {
1373                         struct base_fence fence;
1374                         int fd;
1375
1376                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1377                                 return -EINVAL;
1378
1379                         fd = kbase_stream_create_fence(fence.basep.stream_fd);
1380                         if (fd < 0)
1381                                 return -EINVAL;
1382
1383                         katom->fence = sync_fence_fdget(fd);
1384
1385                         if (katom->fence == NULL) {
1386                                 /* The only way the fence can be NULL is if userspace closed it for us.
1387                                  * So we don't need to clear it up */
1388                                 return -EINVAL;
1389                         }
1390                         fence.basep.fd = fd;
1391                         if (0 != copy_to_user((__user void *)(uintptr_t) katom->jc, &fence, sizeof(fence))) {
1392                                 katom->fence = NULL;
1393                                 sys_close(fd);
1394                                 return -EINVAL;
1395                         }
1396                 }
1397                 break;
1398         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1399                 {
1400                         struct base_fence fence;
1401
1402                         if (0 != copy_from_user(&fence, (__user void *)(uintptr_t) katom->jc, sizeof(fence)))
1403                                 return -EINVAL;
1404
1405                         /* Get a reference to the fence object */
1406                         katom->fence = sync_fence_fdget(fence.basep.fd);
1407                         if (katom->fence == NULL)
1408                                 return -EINVAL;
1409                 }
1410                 break;
1411 #endif                          /* CONFIG_SYNC */
1412         case BASE_JD_REQ_SOFT_JIT_ALLOC:
1413                 return kbase_jit_allocate_prepare(katom);
1414         case BASE_JD_REQ_SOFT_REPLAY:
1415         case BASE_JD_REQ_SOFT_JIT_FREE:
1416                 break;
1417         case BASE_JD_REQ_SOFT_EVENT_WAIT:
1418         case BASE_JD_REQ_SOFT_EVENT_SET:
1419         case BASE_JD_REQ_SOFT_EVENT_RESET:
1420                 if (katom->jc == 0)
1421                         return -EINVAL;
1422                 break;
1423         case BASE_JD_REQ_SOFT_DEBUG_COPY:
1424                 return kbase_debug_copy_prepare(katom);
1425         case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1426                 return kbase_ext_res_prepare(katom);
1427         case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1428                 return kbase_ext_res_prepare(katom);
1429         default:
1430                 /* Unsupported soft-job */
1431                 return -EINVAL;
1432         }
1433         return 0;
1434 }
1435
1436 void kbase_finish_soft_job(struct kbase_jd_atom *katom)
1437 {
1438         switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
1439         case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
1440                 /* Nothing to do */
1441                 break;
1442 #ifdef CONFIG_SYNC
1443         case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
1444                 /* If fence has not yet been signalled, do it now */
1445                 if (katom->fence) {
1446                         kbase_fence_trigger(katom, katom->event_code ==
1447                                         BASE_JD_EVENT_DONE ? 0 : -EFAULT);
1448                         sync_fence_put(katom->fence);
1449                         katom->fence = NULL;
1450                 }
1451                 break;
1452         case BASE_JD_REQ_SOFT_FENCE_WAIT:
1453                 /* Release the reference to the fence object */
1454                 if(katom->fence) {
1455                         sync_fence_put(katom->fence);
1456                         katom->fence = NULL;
1457                 }
1458                 break;
1459 #endif                          /* CONFIG_SYNC */
1460
1461         case BASE_JD_REQ_SOFT_DEBUG_COPY:
1462                 kbase_debug_copy_finish(katom);
1463                 break;
1464         case BASE_JD_REQ_SOFT_JIT_ALLOC:
1465                 kbase_jit_allocate_finish(katom);
1466                 break;
1467         case BASE_JD_REQ_SOFT_EXT_RES_MAP:
1468                 kbase_ext_res_finish(katom);
1469                 break;
1470         case BASE_JD_REQ_SOFT_EXT_RES_UNMAP:
1471                 kbase_ext_res_finish(katom);
1472                 break;
1473         }
1474 }
1475
1476 void kbase_resume_suspended_soft_jobs(struct kbase_device *kbdev)
1477 {
1478         LIST_HEAD(local_suspended_soft_jobs);
1479         struct kbase_jd_atom *tmp_iter;
1480         struct kbase_jd_atom *katom_iter;
1481         struct kbasep_js_device_data *js_devdata;
1482         bool resched = false;
1483
1484         KBASE_DEBUG_ASSERT(kbdev);
1485
1486         js_devdata = &kbdev->js_data;
1487
1488         /* Move out the entire list */
1489         mutex_lock(&js_devdata->runpool_mutex);
1490         list_splice_init(&js_devdata->suspended_soft_jobs_list,
1491                         &local_suspended_soft_jobs);
1492         mutex_unlock(&js_devdata->runpool_mutex);
1493
1494         /*
1495          * Each atom must be detached from the list and ran separately -
1496          * it could be re-added to the old list, but this is unlikely
1497          */
1498         list_for_each_entry_safe(katom_iter, tmp_iter,
1499                         &local_suspended_soft_jobs, dep_item[1]) {
1500                 struct kbase_context *kctx = katom_iter->kctx;
1501
1502                 mutex_lock(&kctx->jctx.lock);
1503
1504                 /* Remove from the global list */
1505                 list_del(&katom_iter->dep_item[1]);
1506                 /* Remove from the context's list of waiting soft jobs */
1507                 kbasep_remove_waiting_soft_job(katom_iter);
1508
1509                 if (kbase_process_soft_job(katom_iter) == 0) {
1510                         kbase_finish_soft_job(katom_iter);
1511                         resched |= jd_done_nolock(katom_iter, NULL);
1512                 } else {
1513                         KBASE_DEBUG_ASSERT((katom_iter->core_req &
1514                                         BASE_JD_REQ_SOFT_JOB_TYPE)
1515                                         != BASE_JD_REQ_SOFT_REPLAY);
1516                 }
1517
1518                 mutex_unlock(&kctx->jctx.lock);
1519         }
1520
1521         if (resched)
1522                 kbase_js_sched_all(kbdev);
1523 }