MALI: midgard: RK: add separate src dir of Midgard driver for RK Linux device
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / arm / midgard_for_linux / backend / gpu / mali_kbase_irq_linux.c
1 /*
2  *
3  * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
4  *
5  * This program is free software and is provided to you under the terms of the
6  * GNU General Public License version 2 as published by the Free Software
7  * Foundation, and any use by you of this program is subject to the terms
8  * of such GNU licence.
9  *
10  * A copy of the licence is included with the program, and can also be obtained
11  * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12  * Boston, MA  02110-1301, USA.
13  *
14  */
15
16
17
18 #include <mali_kbase.h>
19 #include <backend/gpu/mali_kbase_device_internal.h>
20 #include <backend/gpu/mali_kbase_irq_internal.h>
21
22 #include <linux/interrupt.h>
23
24 #if !defined(CONFIG_MALI_NO_MALI)
25
26 /* GPU IRQ Tags */
27 #define JOB_IRQ_TAG     0
28 #define MMU_IRQ_TAG     1
29 #define GPU_IRQ_TAG     2
30
31
32 static void *kbase_tag(void *ptr, u32 tag)
33 {
34         return (void *)(((uintptr_t) ptr) | tag);
35 }
36
37 static void *kbase_untag(void *ptr)
38 {
39         return (void *)(((uintptr_t) ptr) & ~3);
40 }
41
42
43
44
45 static irqreturn_t kbase_job_irq_handler(int irq, void *data)
46 {
47         unsigned long flags;
48         struct kbase_device *kbdev = kbase_untag(data);
49         u32 val;
50
51         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
52
53         if (!kbdev->pm.backend.gpu_powered) {
54                 /* GPU is turned off - IRQ is not for us */
55                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
56                                                                         flags);
57                 return IRQ_NONE;
58         }
59
60         val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
61
62 #ifdef CONFIG_MALI_DEBUG
63         if (!kbdev->pm.backend.driver_ready_for_irqs)
64                 dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
65                                 __func__, irq, val);
66 #endif /* CONFIG_MALI_DEBUG */
67         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
68
69         if (!val)
70                 return IRQ_NONE;
71
72         dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
73
74         kbase_job_done(kbdev, val);
75
76         return IRQ_HANDLED;
77 }
78
79 KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
80
81 static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
82 {
83         unsigned long flags;
84         struct kbase_device *kbdev = kbase_untag(data);
85         u32 val;
86
87         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
88
89         if (!kbdev->pm.backend.gpu_powered) {
90                 /* GPU is turned off - IRQ is not for us */
91                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
92                                                                         flags);
93                 return IRQ_NONE;
94         }
95
96         atomic_inc(&kbdev->faults_pending);
97
98         val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
99
100 #ifdef CONFIG_MALI_DEBUG
101         if (!kbdev->pm.backend.driver_ready_for_irqs)
102                 dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
103                                 __func__, irq, val);
104 #endif /* CONFIG_MALI_DEBUG */
105         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
106
107         if (!val) {
108                 atomic_dec(&kbdev->faults_pending);
109                 return IRQ_NONE;
110         }
111
112         dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
113
114         kbase_mmu_interrupt(kbdev, val);
115
116         atomic_dec(&kbdev->faults_pending);
117
118         return IRQ_HANDLED;
119 }
120
121 static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
122 {
123         unsigned long flags;
124         struct kbase_device *kbdev = kbase_untag(data);
125         u32 val;
126
127         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
128
129         if (!kbdev->pm.backend.gpu_powered) {
130                 /* GPU is turned off - IRQ is not for us */
131                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
132                                                                         flags);
133                 return IRQ_NONE;
134         }
135
136         val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
137
138 #ifdef CONFIG_MALI_DEBUG
139         if (!kbdev->pm.backend.driver_ready_for_irqs)
140                 dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
141                                 __func__, irq, val);
142 #endif /* CONFIG_MALI_DEBUG */
143         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
144
145         if (!val)
146                 return IRQ_NONE;
147
148         dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
149
150         kbase_gpu_interrupt(kbdev, val);
151
152         return IRQ_HANDLED;
153 }
154 static irq_handler_t kbase_handler_table[] = {
155         [JOB_IRQ_TAG] = kbase_job_irq_handler,
156         [MMU_IRQ_TAG] = kbase_mmu_irq_handler,
157         [GPU_IRQ_TAG] = kbase_gpu_irq_handler,
158 };
159
160
161 #ifdef CONFIG_MALI_DEBUG
162 #define  JOB_IRQ_HANDLER JOB_IRQ_TAG
163 #define  MMU_IRQ_HANDLER MMU_IRQ_TAG
164 #define  GPU_IRQ_HANDLER GPU_IRQ_TAG
165
166 /**
167  * kbase_set_custom_irq_handler - Set a custom IRQ handler
168  * @kbdev: Device for which the handler is to be registered
169  * @custom_handler: Handler to be registered
170  * @irq_type: Interrupt type
171  *
172  * Registers given interrupt handler for requested interrupt type
173  * In the case where irq handler is not specified, the default handler shall be
174  * registered
175  *
176  * Return: 0 case success, error code otherwise
177  */
178 int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
179                                         irq_handler_t custom_handler,
180                                         int irq_type)
181 {
182         int result = 0;
183         irq_handler_t requested_irq_handler = NULL;
184
185         KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
186                                                 (GPU_IRQ_HANDLER >= irq_type));
187
188         /* Release previous handler */
189         if (kbdev->irqs[irq_type].irq)
190                 free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
191
192         requested_irq_handler = (NULL != custom_handler) ? custom_handler :
193                                                 kbase_handler_table[irq_type];
194
195         if (0 != request_irq(kbdev->irqs[irq_type].irq,
196                         requested_irq_handler,
197                         kbdev->irqs[irq_type].flags | IRQF_SHARED,
198                         dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
199                 result = -EINVAL;
200                 dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
201                                         kbdev->irqs[irq_type].irq, irq_type);
202 #ifdef CONFIG_SPARSE_IRQ
203                 dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
204 #endif /* CONFIG_SPARSE_IRQ */
205         }
206
207         return result;
208 }
209
210 KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
211
212 /* test correct interrupt assigment and reception by cpu */
213 struct kbasep_irq_test {
214         struct hrtimer timer;
215         wait_queue_head_t wait;
216         int triggered;
217         u32 timeout;
218 };
219
220 static struct kbasep_irq_test kbasep_irq_test_data;
221
222 #define IRQ_TEST_TIMEOUT    500
223
224 static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
225 {
226         unsigned long flags;
227         struct kbase_device *kbdev = kbase_untag(data);
228         u32 val;
229
230         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
231
232         if (!kbdev->pm.backend.gpu_powered) {
233                 /* GPU is turned off - IRQ is not for us */
234                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
235                                                                         flags);
236                 return IRQ_NONE;
237         }
238
239         val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
240
241         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
242
243         if (!val)
244                 return IRQ_NONE;
245
246         dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
247
248         kbasep_irq_test_data.triggered = 1;
249         wake_up(&kbasep_irq_test_data.wait);
250
251         kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
252
253         return IRQ_HANDLED;
254 }
255
256 static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
257 {
258         unsigned long flags;
259         struct kbase_device *kbdev = kbase_untag(data);
260         u32 val;
261
262         spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
263
264         if (!kbdev->pm.backend.gpu_powered) {
265                 /* GPU is turned off - IRQ is not for us */
266                 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
267                                                                         flags);
268                 return IRQ_NONE;
269         }
270
271         val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
272
273         spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
274
275         if (!val)
276                 return IRQ_NONE;
277
278         dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
279
280         kbasep_irq_test_data.triggered = 1;
281         wake_up(&kbasep_irq_test_data.wait);
282
283         kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
284
285         return IRQ_HANDLED;
286 }
287
288 static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
289 {
290         struct kbasep_irq_test *test_data = container_of(timer,
291                                                 struct kbasep_irq_test, timer);
292
293         test_data->timeout = 1;
294         test_data->triggered = 1;
295         wake_up(&test_data->wait);
296         return HRTIMER_NORESTART;
297 }
298
299 static int kbasep_common_test_interrupt(
300                                 struct kbase_device * const kbdev, u32 tag)
301 {
302         int err = 0;
303         irq_handler_t test_handler;
304
305         u32 old_mask_val;
306         u16 mask_offset;
307         u16 rawstat_offset;
308
309         switch (tag) {
310         case JOB_IRQ_TAG:
311                 test_handler = kbase_job_irq_test_handler;
312                 rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
313                 mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
314                 break;
315         case MMU_IRQ_TAG:
316                 test_handler = kbase_mmu_irq_test_handler;
317                 rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
318                 mask_offset = MMU_REG(MMU_IRQ_MASK);
319                 break;
320         case GPU_IRQ_TAG:
321                 /* already tested by pm_driver - bail out */
322         default:
323                 return 0;
324         }
325
326         /* store old mask */
327         old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
328         /* mask interrupts */
329         kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
330
331         if (kbdev->irqs[tag].irq) {
332                 /* release original handler and install test handler */
333                 if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) {
334                         err = -EINVAL;
335                 } else {
336                         kbasep_irq_test_data.timeout = 0;
337                         hrtimer_init(&kbasep_irq_test_data.timer,
338                                         CLOCK_MONOTONIC, HRTIMER_MODE_REL);
339                         kbasep_irq_test_data.timer.function =
340                                                 kbasep_test_interrupt_timeout;
341
342                         /* trigger interrupt */
343                         kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
344                         kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
345
346                         hrtimer_start(&kbasep_irq_test_data.timer,
347                                         HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
348                                         HRTIMER_MODE_REL);
349
350                         wait_event(kbasep_irq_test_data.wait,
351                                         kbasep_irq_test_data.triggered != 0);
352
353                         if (kbasep_irq_test_data.timeout != 0) {
354                                 dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n",
355                                                 kbdev->irqs[tag].irq, tag);
356                                 err = -EINVAL;
357                         } else {
358                                 dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n",
359                                                 kbdev->irqs[tag].irq, tag);
360                         }
361
362                         hrtimer_cancel(&kbasep_irq_test_data.timer);
363                         kbasep_irq_test_data.triggered = 0;
364
365                         /* mask interrupts */
366                         kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
367
368                         /* release test handler */
369                         free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
370                 }
371
372                 /* restore original interrupt */
373                 if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag],
374                                 kbdev->irqs[tag].flags | IRQF_SHARED,
375                                 dev_name(kbdev->dev), kbase_tag(kbdev, tag))) {
376                         dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n",
377                                                 kbdev->irqs[tag].irq, tag);
378                         err = -EINVAL;
379                 }
380         }
381         /* restore old mask */
382         kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
383
384         return err;
385 }
386
387 int kbasep_common_test_interrupt_handlers(
388                                         struct kbase_device * const kbdev)
389 {
390         int err;
391
392         init_waitqueue_head(&kbasep_irq_test_data.wait);
393         kbasep_irq_test_data.triggered = 0;
394
395         /* A suspend won't happen during startup/insmod */
396         kbase_pm_context_active(kbdev);
397
398         err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
399         if (err) {
400                 dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
401                 goto out;
402         }
403
404         err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
405         if (err) {
406                 dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
407                 goto out;
408         }
409
410         dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
411
412  out:
413         kbase_pm_context_idle(kbdev);
414
415         return err;
416 }
417 #endif /* CONFIG_MALI_DEBUG */
418
419 int kbase_install_interrupts(struct kbase_device *kbdev)
420 {
421         u32 nr = ARRAY_SIZE(kbase_handler_table);
422         int err;
423         u32 i;
424
425         for (i = 0; i < nr; i++) {
426                 err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i],
427                                 kbdev->irqs[i].flags | IRQF_SHARED,
428                                 dev_name(kbdev->dev),
429                                 kbase_tag(kbdev, i));
430                 if (err) {
431                         dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
432                                                         kbdev->irqs[i].irq, i);
433 #ifdef CONFIG_SPARSE_IRQ
434                         dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
435 #endif /* CONFIG_SPARSE_IRQ */
436                         goto release;
437                 }
438         }
439
440         return 0;
441
442  release:
443         while (i-- > 0)
444                 free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
445
446         return err;
447 }
448
449 void kbase_release_interrupts(struct kbase_device *kbdev)
450 {
451         u32 nr = ARRAY_SIZE(kbase_handler_table);
452         u32 i;
453
454         for (i = 0; i < nr; i++) {
455                 if (kbdev->irqs[i].irq)
456                         free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
457         }
458 }
459
460 void kbase_synchronize_irqs(struct kbase_device *kbdev)
461 {
462         u32 nr = ARRAY_SIZE(kbase_handler_table);
463         u32 i;
464
465         for (i = 0; i < nr; i++) {
466                 if (kbdev->irqs[i].irq)
467                         synchronize_irq(kbdev->irqs[i].irq);
468         }
469 }
470
471 #endif /* !defined(CONFIG_MALI_NO_MALI) */