spi: mediatek: single device does not require cs_gpios
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 #define CREATE_TRACE_POINTS
31 #include "gpu_sched_trace.h"
32
33 static struct amd_sched_job *
34 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
35 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
36
37 /* Initialize a given run queue struct */
38 static void amd_sched_rq_init(struct amd_sched_rq *rq)
39 {
40         spin_lock_init(&rq->lock);
41         INIT_LIST_HEAD(&rq->entities);
42         rq->current_entity = NULL;
43 }
44
45 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
46                                     struct amd_sched_entity *entity)
47 {
48         spin_lock(&rq->lock);
49         list_add_tail(&entity->list, &rq->entities);
50         spin_unlock(&rq->lock);
51 }
52
53 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
54                                        struct amd_sched_entity *entity)
55 {
56         spin_lock(&rq->lock);
57         list_del_init(&entity->list);
58         if (rq->current_entity == entity)
59                 rq->current_entity = NULL;
60         spin_unlock(&rq->lock);
61 }
62
63 /**
64  * Select next job from a specified run queue with round robin policy.
65  * Return NULL if nothing available.
66  */
67 static struct amd_sched_job *
68 amd_sched_rq_select_job(struct amd_sched_rq *rq)
69 {
70         struct amd_sched_entity *entity;
71         struct amd_sched_job *sched_job;
72
73         spin_lock(&rq->lock);
74
75         entity = rq->current_entity;
76         if (entity) {
77                 list_for_each_entry_continue(entity, &rq->entities, list) {
78                         sched_job = amd_sched_entity_pop_job(entity);
79                         if (sched_job) {
80                                 rq->current_entity = entity;
81                                 spin_unlock(&rq->lock);
82                                 return sched_job;
83                         }
84                 }
85         }
86
87         list_for_each_entry(entity, &rq->entities, list) {
88
89                 sched_job = amd_sched_entity_pop_job(entity);
90                 if (sched_job) {
91                         rq->current_entity = entity;
92                         spin_unlock(&rq->lock);
93                         return sched_job;
94                 }
95
96                 if (entity == rq->current_entity)
97                         break;
98         }
99
100         spin_unlock(&rq->lock);
101
102         return NULL;
103 }
104
105 /**
106  * Init a context entity used by scheduler when submit to HW ring.
107  *
108  * @sched       The pointer to the scheduler
109  * @entity      The pointer to a valid amd_sched_entity
110  * @rq          The run queue this entity belongs
111  * @kernel      If this is an entity for the kernel
112  * @jobs        The max number of jobs in the job queue
113  *
114  * return 0 if succeed. negative error code on failure
115 */
116 int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
117                           struct amd_sched_entity *entity,
118                           struct amd_sched_rq *rq,
119                           uint32_t jobs)
120 {
121         int r;
122
123         if (!(sched && entity && rq))
124                 return -EINVAL;
125
126         memset(entity, 0, sizeof(struct amd_sched_entity));
127         INIT_LIST_HEAD(&entity->list);
128         entity->rq = rq;
129         entity->sched = sched;
130
131         spin_lock_init(&entity->queue_lock);
132         r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
133         if (r)
134                 return r;
135
136         atomic_set(&entity->fence_seq, 0);
137         entity->fence_context = fence_context_alloc(1);
138
139         /* Add the entity to the run queue */
140         amd_sched_rq_add_entity(rq, entity);
141
142         return 0;
143 }
144
145 /**
146  * Query if entity is initialized
147  *
148  * @sched       Pointer to scheduler instance
149  * @entity      The pointer to a valid scheduler entity
150  *
151  * return true if entity is initialized, false otherwise
152 */
153 static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
154                                             struct amd_sched_entity *entity)
155 {
156         return entity->sched == sched &&
157                 entity->rq != NULL;
158 }
159
160 /**
161  * Check if entity is idle
162  *
163  * @entity      The pointer to a valid scheduler entity
164  *
165  * Return true if entity don't has any unscheduled jobs.
166  */
167 static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
168 {
169         rmb();
170         if (kfifo_is_empty(&entity->job_queue))
171                 return true;
172
173         return false;
174 }
175
176 /**
177  * Destroy a context entity
178  *
179  * @sched       Pointer to scheduler instance
180  * @entity      The pointer to a valid scheduler entity
181  *
182  * Cleanup and free the allocated resources.
183  */
184 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
185                            struct amd_sched_entity *entity)
186 {
187         struct amd_sched_rq *rq = entity->rq;
188
189         if (!amd_sched_entity_is_initialized(sched, entity))
190                 return;
191
192         /**
193          * The client will not queue more IBs during this fini, consume existing
194          * queued IBs
195         */
196         wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
197
198         amd_sched_rq_remove_entity(rq, entity);
199         kfifo_free(&entity->job_queue);
200 }
201
202 static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
203 {
204         struct amd_sched_entity *entity =
205                 container_of(cb, struct amd_sched_entity, cb);
206         entity->dependency = NULL;
207         fence_put(f);
208         amd_sched_wakeup(entity->sched);
209 }
210
211 static struct amd_sched_job *
212 amd_sched_entity_pop_job(struct amd_sched_entity *entity)
213 {
214         struct amd_gpu_scheduler *sched = entity->sched;
215         struct amd_sched_job *sched_job;
216
217         if (ACCESS_ONCE(entity->dependency))
218                 return NULL;
219
220         if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
221                 return NULL;
222
223         while ((entity->dependency = sched->ops->dependency(sched_job))) {
224
225                 if (entity->dependency->context == entity->fence_context) {
226                         /* We can ignore fences from ourself */
227                         fence_put(entity->dependency);
228                         continue;
229                 }
230
231                 if (fence_add_callback(entity->dependency, &entity->cb,
232                                        amd_sched_entity_wakeup))
233                         fence_put(entity->dependency);
234                 else
235                         return NULL;
236         }
237
238         return sched_job;
239 }
240
241 /**
242  * Helper to submit a job to the job queue
243  *
244  * @sched_job           The pointer to job required to submit
245  *
246  * Returns true if we could submit the job.
247  */
248 static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
249 {
250         struct amd_sched_entity *entity = sched_job->s_entity;
251         bool added, first = false;
252
253         spin_lock(&entity->queue_lock);
254         added = kfifo_in(&entity->job_queue, &sched_job,
255                         sizeof(sched_job)) == sizeof(sched_job);
256
257         if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
258                 first = true;
259
260         spin_unlock(&entity->queue_lock);
261
262         /* first job wakes up scheduler */
263         if (first)
264                 amd_sched_wakeup(sched_job->sched);
265
266         return added;
267 }
268
269 /**
270  * Submit a job to the job queue
271  *
272  * @sched_job           The pointer to job required to submit
273  *
274  * Returns 0 for success, negative error code otherwise.
275  */
276 int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
277 {
278         struct amd_sched_entity *entity = sched_job->s_entity;
279         struct amd_sched_fence *fence = amd_sched_fence_create(
280                 entity, sched_job->owner);
281
282         if (!fence)
283                 return -ENOMEM;
284
285         fence_get(&fence->base);
286         sched_job->s_fence = fence;
287
288         wait_event(entity->sched->job_scheduled,
289                    amd_sched_entity_in(sched_job));
290         trace_amd_sched_job(sched_job);
291         return 0;
292 }
293
294 /**
295  * Return ture if we can push more jobs to the hw.
296  */
297 static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
298 {
299         return atomic_read(&sched->hw_rq_count) <
300                 sched->hw_submission_limit;
301 }
302
303 /**
304  * Wake up the scheduler when it is ready
305  */
306 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
307 {
308         if (amd_sched_ready(sched))
309                 wake_up_interruptible(&sched->wake_up_worker);
310 }
311
312 /**
313  * Select next to run
314 */
315 static struct amd_sched_job *
316 amd_sched_select_job(struct amd_gpu_scheduler *sched)
317 {
318         struct amd_sched_job *sched_job;
319
320         if (!amd_sched_ready(sched))
321                 return NULL;
322
323         /* Kernel run queue has higher priority than normal run queue*/
324         sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
325         if (sched_job == NULL)
326                 sched_job = amd_sched_rq_select_job(&sched->sched_rq);
327
328         return sched_job;
329 }
330
331 static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
332 {
333         struct amd_sched_fence *s_fence =
334                 container_of(cb, struct amd_sched_fence, cb);
335         struct amd_gpu_scheduler *sched = s_fence->sched;
336         unsigned long flags;
337
338         atomic_dec(&sched->hw_rq_count);
339         amd_sched_fence_signal(s_fence);
340         if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
341                 cancel_delayed_work(&s_fence->dwork);
342                 spin_lock_irqsave(&sched->fence_list_lock, flags);
343                 list_del_init(&s_fence->list);
344                 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
345         }
346         fence_put(&s_fence->base);
347         wake_up_interruptible(&sched->wake_up_worker);
348 }
349
350 static void amd_sched_fence_work_func(struct work_struct *work)
351 {
352         struct amd_sched_fence *s_fence =
353                 container_of(work, struct amd_sched_fence, dwork.work);
354         struct amd_gpu_scheduler *sched = s_fence->sched;
355         struct amd_sched_fence *entity, *tmp;
356         unsigned long flags;
357
358         DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
359
360         /* Clean all pending fences */
361         spin_lock_irqsave(&sched->fence_list_lock, flags);
362         list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
363                 DRM_ERROR("  fence no %d\n", entity->base.seqno);
364                 cancel_delayed_work(&entity->dwork);
365                 list_del_init(&entity->list);
366                 fence_put(&entity->base);
367         }
368         spin_unlock_irqrestore(&sched->fence_list_lock, flags);
369 }
370
371 static int amd_sched_main(void *param)
372 {
373         struct sched_param sparam = {.sched_priority = 1};
374         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
375         int r, count;
376
377         spin_lock_init(&sched->fence_list_lock);
378         INIT_LIST_HEAD(&sched->fence_list);
379         sched_setscheduler(current, SCHED_FIFO, &sparam);
380
381         while (!kthread_should_stop()) {
382                 struct amd_sched_entity *entity;
383                 struct amd_sched_fence *s_fence;
384                 struct amd_sched_job *sched_job;
385                 struct fence *fence;
386                 unsigned long flags;
387
388                 wait_event_interruptible(sched->wake_up_worker,
389                         kthread_should_stop() ||
390                         (sched_job = amd_sched_select_job(sched)));
391
392                 if (!sched_job)
393                         continue;
394
395                 entity = sched_job->s_entity;
396                 s_fence = sched_job->s_fence;
397
398                 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
399                         INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
400                         schedule_delayed_work(&s_fence->dwork, sched->timeout);
401                         spin_lock_irqsave(&sched->fence_list_lock, flags);
402                         list_add_tail(&s_fence->list, &sched->fence_list);
403                         spin_unlock_irqrestore(&sched->fence_list_lock, flags);
404                 }
405
406                 atomic_inc(&sched->hw_rq_count);
407                 fence = sched->ops->run_job(sched_job);
408                 if (fence) {
409                         r = fence_add_callback(fence, &s_fence->cb,
410                                                amd_sched_process_job);
411                         if (r == -ENOENT)
412                                 amd_sched_process_job(fence, &s_fence->cb);
413                         else if (r)
414                                 DRM_ERROR("fence add callback failed (%d)\n", r);
415                         fence_put(fence);
416                 } else {
417                         DRM_ERROR("Failed to run job!\n");
418                         amd_sched_process_job(NULL, &s_fence->cb);
419                 }
420
421                 count = kfifo_out(&entity->job_queue, &sched_job,
422                                 sizeof(sched_job));
423                 WARN_ON(count != sizeof(sched_job));
424                 wake_up(&sched->job_scheduled);
425         }
426         return 0;
427 }
428
429 /**
430  * Init a gpu scheduler instance
431  *
432  * @sched               The pointer to the scheduler
433  * @ops                 The backend operations for this scheduler.
434  * @hw_submissions      Number of hw submissions to do.
435  * @name                Name used for debugging
436  *
437  * Return 0 on success, otherwise error code.
438 */
439 int amd_sched_init(struct amd_gpu_scheduler *sched,
440                    struct amd_sched_backend_ops *ops,
441                    unsigned hw_submission, long timeout, const char *name)
442 {
443         sched->ops = ops;
444         sched->hw_submission_limit = hw_submission;
445         sched->name = name;
446         sched->timeout = timeout;
447         amd_sched_rq_init(&sched->sched_rq);
448         amd_sched_rq_init(&sched->kernel_rq);
449
450         init_waitqueue_head(&sched->wake_up_worker);
451         init_waitqueue_head(&sched->job_scheduled);
452         atomic_set(&sched->hw_rq_count, 0);
453
454         /* Each scheduler will run on a seperate kernel thread */
455         sched->thread = kthread_run(amd_sched_main, sched, sched->name);
456         if (IS_ERR(sched->thread)) {
457                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
458                 return PTR_ERR(sched->thread);
459         }
460
461         return 0;
462 }
463
464 /**
465  * Destroy a gpu scheduler
466  *
467  * @sched       The pointer to the scheduler
468  */
469 void amd_sched_fini(struct amd_gpu_scheduler *sched)
470 {
471         if (sched->thread)
472                 kthread_stop(sched->thread);
473 }