drm/amdgpu: clean up amd sched wait_ts and wait_signal
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
27 #include <drm/drmP.h>
28 #include "gpu_scheduler.h"
29
30 /* Initialize a given run queue struct */
31 static void init_rq(struct amd_run_queue *rq)
32 {
33         INIT_LIST_HEAD(&rq->head.list);
34         rq->head.belongto_rq = rq;
35         mutex_init(&rq->lock);
36         atomic_set(&rq->nr_entity, 0);
37         rq->current_entity = &rq->head;
38 }
39
40 /* Note: caller must hold the lock or in a atomic context */
41 static void rq_remove_entity(struct amd_run_queue *rq,
42                              struct amd_sched_entity *entity)
43 {
44         if (rq->current_entity == entity)
45                 rq->current_entity = list_entry(entity->list.prev,
46                                                 typeof(*entity), list);
47         list_del_init(&entity->list);
48         atomic_dec(&rq->nr_entity);
49 }
50
51 static void rq_add_entity(struct amd_run_queue *rq,
52                           struct amd_sched_entity *entity)
53 {
54         list_add_tail(&entity->list, &rq->head.list);
55         atomic_inc(&rq->nr_entity);
56 }
57
58 /**
59  * Select next entity from a specified run queue with round robin policy.
60  * It could return the same entity as current one if current is the only
61  * available one in the queue. Return NULL if nothing available.
62  */
63 static struct amd_sched_entity *rq_select_entity(struct amd_run_queue *rq)
64 {
65         struct amd_sched_entity *p = rq->current_entity;
66         int i = atomic_read(&rq->nr_entity) + 1; /*real count + dummy head*/
67         while (i) {
68                 p = list_entry(p->list.next, typeof(*p), list);
69                 if (!rq->check_entity_status(p)) {
70                         rq->current_entity = p;
71                         break;
72                 }
73                 i--;
74         }
75         return i ? p : NULL;
76 }
77
78 static bool context_entity_is_waiting(struct amd_context_entity *entity)
79 {
80         /* TODO: sync obj for multi-ring synchronization */
81         return false;
82 }
83
84 static int gpu_entity_check_status(struct amd_sched_entity *entity)
85 {
86         struct amd_context_entity *tmp = NULL;
87
88         if (entity == &entity->belongto_rq->head)
89                 return -1;
90
91         tmp = container_of(entity, typeof(*tmp), generic_entity);
92         if (kfifo_is_empty(&tmp->job_queue) ||
93             context_entity_is_waiting(tmp))
94                 return -1;
95
96         return 0;
97 }
98
99 /**
100  * Note: This function should only been called inside scheduler main
101  * function for thread safety, there is no other protection here.
102  * return ture if scheduler has something ready to run.
103  *
104  * For active_hw_rq, there is only one producer(scheduler thread) and
105  * one consumer(ISR). It should be safe to use this function in scheduler
106  * main thread to decide whether to continue emit more IBs.
107 */
108 static bool is_scheduler_ready(struct amd_gpu_scheduler *sched)
109 {
110         return !kfifo_is_full(&sched->active_hw_rq);
111 }
112
113 /**
114  * Select next entity from the kernel run queue, if not available,
115  * return null.
116 */
117 static struct amd_context_entity *kernel_rq_select_context(
118         struct amd_gpu_scheduler *sched)
119 {
120         struct amd_sched_entity *sched_entity = NULL;
121         struct amd_context_entity *tmp = NULL;
122         struct amd_run_queue *rq = &sched->kernel_rq;
123
124         mutex_lock(&rq->lock);
125         sched_entity = rq_select_entity(rq);
126         if (sched_entity)
127                 tmp = container_of(sched_entity,
128                                    typeof(*tmp),
129                                    generic_entity);
130         mutex_unlock(&rq->lock);
131         return tmp;
132 }
133
134 /**
135  * Select next entity containing real IB submissions
136 */
137 static struct amd_context_entity *select_context(
138         struct amd_gpu_scheduler *sched)
139 {
140         struct amd_context_entity *wake_entity = NULL;
141         struct amd_context_entity *tmp;
142         struct amd_run_queue *rq;
143
144         if (!is_scheduler_ready(sched))
145                 return NULL;
146
147         /* Kernel run queue has higher priority than normal run queue*/
148         tmp = kernel_rq_select_context(sched);
149         if (tmp != NULL)
150                 goto exit;
151
152         WARN_ON(offsetof(struct amd_context_entity, generic_entity) != 0);
153
154         rq = &sched->sched_rq;
155         mutex_lock(&rq->lock);
156         tmp = container_of(rq_select_entity(rq),
157                            typeof(*tmp), generic_entity);
158         mutex_unlock(&rq->lock);
159 exit:
160         if (sched->current_entity && (sched->current_entity != tmp))
161                 wake_entity = sched->current_entity;
162         sched->current_entity = tmp;
163         if (wake_entity)
164                 wake_up(&wake_entity->wait_queue);
165         return tmp;
166 }
167
168 /**
169  * Init a context entity used by scheduler when submit to HW ring.
170  *
171  * @sched       The pointer to the scheduler
172  * @entity      The pointer to a valid amd_context_entity
173  * @parent      The parent entity of this amd_context_entity
174  * @rq          The run queue this entity belongs
175  * @context_id  The context id for this entity
176  * @jobs        The max number of jobs in the job queue
177  *
178  * return 0 if succeed. negative error code on failure
179 */
180 int amd_context_entity_init(struct amd_gpu_scheduler *sched,
181                             struct amd_context_entity *entity,
182                             struct amd_sched_entity *parent,
183                             struct amd_run_queue *rq,
184                             uint32_t context_id,
185                             uint32_t jobs)
186 {
187         uint64_t seq_ring = 0;
188
189         if (!(sched && entity && rq))
190                 return -EINVAL;
191
192         memset(entity, 0, sizeof(struct amd_context_entity));
193         seq_ring = ((uint64_t)sched->ring_id) << 60;
194         spin_lock_init(&entity->lock);
195         entity->generic_entity.belongto_rq = rq;
196         entity->generic_entity.parent = parent;
197         entity->scheduler = sched;
198         init_waitqueue_head(&entity->wait_queue);
199         init_waitqueue_head(&entity->wait_emit);
200         if(kfifo_alloc(&entity->job_queue,
201                        jobs * sizeof(void *),
202                        GFP_KERNEL))
203                 return -EINVAL;
204
205         spin_lock_init(&entity->queue_lock);
206         entity->tgid = (context_id == AMD_KERNEL_CONTEXT_ID) ?
207                 AMD_KERNEL_PROCESS_ID : current->tgid;
208         entity->context_id = context_id;
209         atomic64_set(&entity->last_emitted_v_seq, seq_ring);
210         atomic64_set(&entity->last_queued_v_seq, seq_ring);
211
212         /* Add the entity to the run queue */
213         mutex_lock(&rq->lock);
214         rq_add_entity(rq, &entity->generic_entity);
215         mutex_unlock(&rq->lock);
216         return 0;
217 }
218
219 /**
220  * Query if entity is initialized
221  *
222  * @sched       Pointer to scheduler instance
223  * @entity      The pointer to a valid scheduler entity
224  *
225  * return true if entity is initialized, false otherwise
226 */
227 static bool is_context_entity_initialized(struct amd_gpu_scheduler *sched,
228                                           struct amd_context_entity *entity)
229 {
230         return entity->scheduler == sched &&
231                 entity->generic_entity.belongto_rq != NULL;
232 }
233
234 static bool is_context_entity_idle(struct amd_gpu_scheduler *sched,
235                                    struct amd_context_entity *entity)
236 {
237         /**
238          * Idle means no pending IBs, and the entity is not
239          * currently being used.
240         */
241         barrier();
242         if ((sched->current_entity != entity) &&
243             kfifo_is_empty(&entity->job_queue))
244                 return true;
245
246         return false;
247 }
248
249 /**
250  * Destroy a context entity
251  *
252  * @sched       Pointer to scheduler instance
253  * @entity      The pointer to a valid scheduler entity
254  *
255  * return 0 if succeed. negative error code on failure
256  */
257 int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
258                             struct amd_context_entity *entity)
259 {
260         int r = 0;
261         struct amd_run_queue *rq = entity->generic_entity.belongto_rq;
262
263         if (!is_context_entity_initialized(sched, entity))
264                 return 0;
265
266         /**
267          * The client will not queue more IBs during this fini, consume existing
268          * queued IBs
269         */
270         r = wait_event_timeout(
271                 entity->wait_queue,
272                 is_context_entity_idle(sched, entity),
273                 msecs_to_jiffies(AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS)
274                 ) ?  0 : -1;
275
276         if (r) {
277                 if (entity->is_pending)
278                         DRM_INFO("Entity %u is in waiting state during fini,\
279                                 all pending ibs will be canceled.\n",
280                                  entity->context_id);
281         }
282
283         mutex_lock(&rq->lock);
284         rq_remove_entity(rq, &entity->generic_entity);
285         mutex_unlock(&rq->lock);
286         kfifo_free(&entity->job_queue);
287         return r;
288 }
289
290 /**
291  * Submit a normal job to the job queue
292  *
293  * @sched       The pointer to the scheduler
294  * @c_entity    The pointer to amd_context_entity
295  * @job         The pointer to job required to submit
296  * return 0 if succeed. -1 if failed.
297  *        -2 indicate queue is full for this client, client should wait untill
298  *           scheduler consum some queued command.
299  *        -1 other fail.
300 */
301 int amd_sched_push_job(struct amd_gpu_scheduler *sched,
302                        struct amd_context_entity *c_entity,
303                        void *job)
304 {
305         while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
306                                    &c_entity->queue_lock) != sizeof(void *)) {
307                 /**
308                  * Current context used up all its IB slots
309                  * wait here, or need to check whether GPU is hung
310                 */
311                 schedule();
312         }
313
314         wake_up_interruptible(&sched->wait_queue);
315         return 0;
316 }
317
318 /**
319  * Wait for a virtual sequence number to be emitted.
320  *
321  * @c_entity    The pointer to a valid context entity
322  * @seq         The virtual sequence number to wait
323  * @intr        Interruptible or not
324  * @timeout     Timeout in ms, wait infinitely if <0
325  * @emit        wait for emit or signal
326  *
327  * return =0 signaled ,  <0 failed
328 */
329 int amd_sched_wait_emit(struct amd_context_entity *c_entity,
330                         uint64_t seq,
331                         bool intr,
332                         long timeout)
333 {
334         atomic64_t *v_seq = &c_entity->last_emitted_v_seq;
335         wait_queue_head_t *wait_queue = &c_entity->wait_emit;
336
337         if (intr && (timeout < 0)) {
338                 wait_event_interruptible(
339                         *wait_queue,
340                         seq <= atomic64_read(v_seq));
341                 return 0;
342         } else if (intr && (timeout >= 0)) {
343                 wait_event_interruptible_timeout(
344                         *wait_queue,
345                         seq <= atomic64_read(v_seq),
346                         msecs_to_jiffies(timeout));
347                 return (seq <= atomic64_read(v_seq)) ?
348                         0 : -1;
349         } else if (!intr && (timeout < 0)) {
350                 wait_event(
351                         *wait_queue,
352                         seq <= atomic64_read(v_seq));
353                 return 0;
354         } else if (!intr && (timeout >= 0)) {
355                 wait_event_timeout(
356                         *wait_queue,
357                         seq <= atomic64_read(v_seq),
358                         msecs_to_jiffies(timeout));
359                 return (seq <= atomic64_read(v_seq)) ?
360                         0 : -1;
361         }
362         return 0;
363 }
364
365 static int amd_sched_main(void *param)
366 {
367         int r;
368         void *job;
369         struct sched_param sparam = {.sched_priority = 1};
370         struct amd_context_entity *c_entity = NULL;
371         struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
372
373         sched_setscheduler(current, SCHED_FIFO, &sparam);
374
375         while (!kthread_should_stop()) {
376                 wait_event_interruptible(sched->wait_queue,
377                                          is_scheduler_ready(sched) &&
378                                          (c_entity = select_context(sched)));
379                 r = kfifo_out(&c_entity->job_queue, &job, sizeof(void *));
380                 if (r != sizeof(void *))
381                         continue;
382                 r = sched->ops->prepare_job(sched, c_entity, job);
383                 if (!r)
384                         WARN_ON(kfifo_in_spinlocked(
385                                         &sched->active_hw_rq,
386                                         &job,
387                                         sizeof(void *),
388                                         &sched->queue_lock) != sizeof(void *));
389                 mutex_lock(&sched->sched_lock);
390                 sched->ops->run_job(sched, c_entity, job);
391                 mutex_unlock(&sched->sched_lock);
392         }
393         return 0;
394 }
395
396 uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched)
397 {
398         return sched->last_handled_seq;
399 }
400
401 /**
402  * ISR to handle EOP inetrrupts
403  *
404  * @sched: gpu scheduler
405  *
406 */
407 void amd_sched_isr(struct amd_gpu_scheduler *sched)
408 {
409         int r;
410         void *job;
411         r = kfifo_out_spinlocked(&sched->active_hw_rq,
412                                  &job, sizeof(void *),
413                                  &sched->queue_lock);
414
415         if (r != sizeof(void *))
416                 job = NULL;
417
418         sched->ops->process_job(sched, job);
419         sched->last_handled_seq++;
420         wake_up_interruptible(&sched->wait_queue);
421 }
422
423 /**
424  * Create a gpu scheduler
425  *
426  * @device      The device context for this scheduler
427  * @ops         The backend operations for this scheduler.
428  * @id          The scheduler is per ring, here is ring id.
429  * @granularity The minumum ms unit the scheduler will scheduled.
430  * @preemption  Indicate whether this ring support preemption, 0 is no.
431  *
432  * return the pointer to scheduler for success, otherwise return NULL
433 */
434 struct amd_gpu_scheduler *amd_sched_create(void *device,
435                                            struct amd_sched_backend_ops *ops,
436                                            unsigned ring,
437                                            unsigned granularity,
438                                            unsigned preemption,
439                                            unsigned hw_submission)
440 {
441         struct amd_gpu_scheduler *sched;
442         char name[20] = "gpu_sched[0]";
443
444         sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
445         if (!sched)
446                 return NULL;
447
448         sched->device = device;
449         sched->ops = ops;
450         sched->granularity = granularity;
451         sched->ring_id = ring;
452         sched->preemption = preemption;
453         sched->last_handled_seq = 0;
454
455         snprintf(name, sizeof(name), "gpu_sched[%d]", ring);
456         mutex_init(&sched->sched_lock);
457         spin_lock_init(&sched->queue_lock);
458         init_rq(&sched->sched_rq);
459         sched->sched_rq.check_entity_status = gpu_entity_check_status;
460
461         init_rq(&sched->kernel_rq);
462         sched->kernel_rq.check_entity_status = gpu_entity_check_status;
463
464         init_waitqueue_head(&sched->wait_queue);
465         if(kfifo_alloc(&sched->active_hw_rq,
466                        hw_submission * sizeof(void *),
467                        GFP_KERNEL)) {
468                 kfree(sched);
469                 return NULL;
470         }
471
472         /* Each scheduler will run on a seperate kernel thread */
473         sched->thread = kthread_create(amd_sched_main, sched, name);
474         if (sched->thread) {
475                 wake_up_process(sched->thread);
476                 return sched;
477         }
478
479         DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
480         kfifo_free(&sched->active_hw_rq);
481         kfree(sched);
482         return NULL;
483 }
484
485 /**
486  * Destroy a gpu scheduler
487  *
488  * @sched       The pointer to the scheduler
489  *
490  * return 0 if succeed. -1 if failed.
491  */
492 int amd_sched_destroy(struct amd_gpu_scheduler *sched)
493 {
494         kthread_stop(sched->thread);
495         kfifo_free(&sched->active_hw_rq);
496         kfree(sched);
497         return  0;
498 }
499