3 * (C) COPYRIGHT 2014-2015 ARM Limited. All rights reserved.
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
10 * A copy of the licence is included with the program, and can also be obtained
11 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
12 * Boston, MA 02110-1301, USA.
18 #include <mali_kbase.h>
19 #include <backend/gpu/mali_kbase_device_internal.h>
20 #include <backend/gpu/mali_kbase_irq_internal.h>
22 #include <linux/interrupt.h>
24 #if !defined(CONFIG_MALI_NO_MALI)
32 static void *kbase_tag(void *ptr, u32 tag)
34 return (void *)(((uintptr_t) ptr) | tag);
37 static void *kbase_untag(void *ptr)
39 return (void *)(((uintptr_t) ptr) & ~3);
45 static irqreturn_t kbase_job_irq_handler(int irq, void *data)
48 struct kbase_device *kbdev = kbase_untag(data);
51 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
53 if (!kbdev->pm.backend.gpu_powered) {
54 /* GPU is turned off - IRQ is not for us */
55 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
60 val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
62 #ifdef CONFIG_MALI_DEBUG
63 if (!kbdev->pm.backend.driver_ready_for_irqs)
64 dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
66 #endif /* CONFIG_MALI_DEBUG */
67 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
72 dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
74 kbase_job_done(kbdev, val);
79 KBASE_EXPORT_TEST_API(kbase_job_irq_handler);
81 static irqreturn_t kbase_mmu_irq_handler(int irq, void *data)
84 struct kbase_device *kbdev = kbase_untag(data);
87 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
89 if (!kbdev->pm.backend.gpu_powered) {
90 /* GPU is turned off - IRQ is not for us */
91 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
96 atomic_inc(&kbdev->faults_pending);
98 val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
100 #ifdef CONFIG_MALI_DEBUG
101 if (!kbdev->pm.backend.driver_ready_for_irqs)
102 dev_warn(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
104 #endif /* CONFIG_MALI_DEBUG */
105 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
108 atomic_dec(&kbdev->faults_pending);
112 dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
114 kbase_mmu_interrupt(kbdev, val);
116 atomic_dec(&kbdev->faults_pending);
121 static irqreturn_t kbase_gpu_irq_handler(int irq, void *data)
124 struct kbase_device *kbdev = kbase_untag(data);
127 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
129 if (!kbdev->pm.backend.gpu_powered) {
130 /* GPU is turned off - IRQ is not for us */
131 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
136 val = kbase_reg_read(kbdev, GPU_CONTROL_REG(GPU_IRQ_STATUS), NULL);
138 #ifdef CONFIG_MALI_DEBUG
139 if (!kbdev->pm.backend.driver_ready_for_irqs)
140 dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x before driver is ready\n",
142 #endif /* CONFIG_MALI_DEBUG */
143 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
148 dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
150 kbase_gpu_interrupt(kbdev, val);
154 static irq_handler_t kbase_handler_table[] = {
155 [JOB_IRQ_TAG] = kbase_job_irq_handler,
156 [MMU_IRQ_TAG] = kbase_mmu_irq_handler,
157 [GPU_IRQ_TAG] = kbase_gpu_irq_handler,
161 #ifdef CONFIG_MALI_DEBUG
162 #define JOB_IRQ_HANDLER JOB_IRQ_TAG
163 #define MMU_IRQ_HANDLER MMU_IRQ_TAG
164 #define GPU_IRQ_HANDLER GPU_IRQ_TAG
167 * kbase_set_custom_irq_handler - Set a custom IRQ handler
168 * @kbdev: Device for which the handler is to be registered
169 * @custom_handler: Handler to be registered
170 * @irq_type: Interrupt type
172 * Registers given interrupt handler for requested interrupt type
173 * In the case where irq handler is not specified, the default handler shall be
176 * Return: 0 case success, error code otherwise
178 int kbase_set_custom_irq_handler(struct kbase_device *kbdev,
179 irq_handler_t custom_handler,
183 irq_handler_t requested_irq_handler = NULL;
185 KBASE_DEBUG_ASSERT((JOB_IRQ_HANDLER <= irq_type) &&
186 (GPU_IRQ_HANDLER >= irq_type));
188 /* Release previous handler */
189 if (kbdev->irqs[irq_type].irq)
190 free_irq(kbdev->irqs[irq_type].irq, kbase_tag(kbdev, irq_type));
192 requested_irq_handler = (NULL != custom_handler) ? custom_handler :
193 kbase_handler_table[irq_type];
195 if (0 != request_irq(kbdev->irqs[irq_type].irq,
196 requested_irq_handler,
197 kbdev->irqs[irq_type].flags | IRQF_SHARED,
198 dev_name(kbdev->dev), kbase_tag(kbdev, irq_type))) {
200 dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
201 kbdev->irqs[irq_type].irq, irq_type);
202 #ifdef CONFIG_SPARSE_IRQ
203 dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
204 #endif /* CONFIG_SPARSE_IRQ */
210 KBASE_EXPORT_TEST_API(kbase_set_custom_irq_handler);
212 /* test correct interrupt assigment and reception by cpu */
213 struct kbasep_irq_test {
214 struct hrtimer timer;
215 wait_queue_head_t wait;
220 static struct kbasep_irq_test kbasep_irq_test_data;
222 #define IRQ_TEST_TIMEOUT 500
224 static irqreturn_t kbase_job_irq_test_handler(int irq, void *data)
227 struct kbase_device *kbdev = kbase_untag(data);
230 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
232 if (!kbdev->pm.backend.gpu_powered) {
233 /* GPU is turned off - IRQ is not for us */
234 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
239 val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS), NULL);
241 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
246 dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
248 kbasep_irq_test_data.triggered = 1;
249 wake_up(&kbasep_irq_test_data.wait);
251 kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val, NULL);
256 static irqreturn_t kbase_mmu_irq_test_handler(int irq, void *data)
259 struct kbase_device *kbdev = kbase_untag(data);
262 spin_lock_irqsave(&kbdev->pm.backend.gpu_powered_lock, flags);
264 if (!kbdev->pm.backend.gpu_powered) {
265 /* GPU is turned off - IRQ is not for us */
266 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock,
271 val = kbase_reg_read(kbdev, MMU_REG(MMU_IRQ_STATUS), NULL);
273 spin_unlock_irqrestore(&kbdev->pm.backend.gpu_powered_lock, flags);
278 dev_dbg(kbdev->dev, "%s: irq %d irqstatus 0x%x\n", __func__, irq, val);
280 kbasep_irq_test_data.triggered = 1;
281 wake_up(&kbasep_irq_test_data.wait);
283 kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), val, NULL);
288 static enum hrtimer_restart kbasep_test_interrupt_timeout(struct hrtimer *timer)
290 struct kbasep_irq_test *test_data = container_of(timer,
291 struct kbasep_irq_test, timer);
293 test_data->timeout = 1;
294 test_data->triggered = 1;
295 wake_up(&test_data->wait);
296 return HRTIMER_NORESTART;
299 static int kbasep_common_test_interrupt(
300 struct kbase_device * const kbdev, u32 tag)
303 irq_handler_t test_handler;
311 test_handler = kbase_job_irq_test_handler;
312 rawstat_offset = JOB_CONTROL_REG(JOB_IRQ_RAWSTAT);
313 mask_offset = JOB_CONTROL_REG(JOB_IRQ_MASK);
316 test_handler = kbase_mmu_irq_test_handler;
317 rawstat_offset = MMU_REG(MMU_IRQ_RAWSTAT);
318 mask_offset = MMU_REG(MMU_IRQ_MASK);
321 /* already tested by pm_driver - bail out */
327 old_mask_val = kbase_reg_read(kbdev, mask_offset, NULL);
328 /* mask interrupts */
329 kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
331 if (kbdev->irqs[tag].irq) {
332 /* release original handler and install test handler */
333 if (kbase_set_custom_irq_handler(kbdev, test_handler, tag) != 0) {
336 kbasep_irq_test_data.timeout = 0;
337 hrtimer_init(&kbasep_irq_test_data.timer,
338 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
339 kbasep_irq_test_data.timer.function =
340 kbasep_test_interrupt_timeout;
342 /* trigger interrupt */
343 kbase_reg_write(kbdev, mask_offset, 0x1, NULL);
344 kbase_reg_write(kbdev, rawstat_offset, 0x1, NULL);
346 hrtimer_start(&kbasep_irq_test_data.timer,
347 HR_TIMER_DELAY_MSEC(IRQ_TEST_TIMEOUT),
350 wait_event(kbasep_irq_test_data.wait,
351 kbasep_irq_test_data.triggered != 0);
353 if (kbasep_irq_test_data.timeout != 0) {
354 dev_err(kbdev->dev, "Interrupt %d (index %d) didn't reach CPU.\n",
355 kbdev->irqs[tag].irq, tag);
358 dev_dbg(kbdev->dev, "Interrupt %d (index %d) reached CPU.\n",
359 kbdev->irqs[tag].irq, tag);
362 hrtimer_cancel(&kbasep_irq_test_data.timer);
363 kbasep_irq_test_data.triggered = 0;
365 /* mask interrupts */
366 kbase_reg_write(kbdev, mask_offset, 0x0, NULL);
368 /* release test handler */
369 free_irq(kbdev->irqs[tag].irq, kbase_tag(kbdev, tag));
372 /* restore original interrupt */
373 if (request_irq(kbdev->irqs[tag].irq, kbase_handler_table[tag],
374 kbdev->irqs[tag].flags | IRQF_SHARED,
375 dev_name(kbdev->dev), kbase_tag(kbdev, tag))) {
376 dev_err(kbdev->dev, "Can't restore original interrupt %d (index %d)\n",
377 kbdev->irqs[tag].irq, tag);
381 /* restore old mask */
382 kbase_reg_write(kbdev, mask_offset, old_mask_val, NULL);
387 int kbasep_common_test_interrupt_handlers(
388 struct kbase_device * const kbdev)
392 init_waitqueue_head(&kbasep_irq_test_data.wait);
393 kbasep_irq_test_data.triggered = 0;
395 /* A suspend won't happen during startup/insmod */
396 kbase_pm_context_active(kbdev);
398 err = kbasep_common_test_interrupt(kbdev, JOB_IRQ_TAG);
400 dev_err(kbdev->dev, "Interrupt JOB_IRQ didn't reach CPU. Check interrupt assignments.\n");
404 err = kbasep_common_test_interrupt(kbdev, MMU_IRQ_TAG);
406 dev_err(kbdev->dev, "Interrupt MMU_IRQ didn't reach CPU. Check interrupt assignments.\n");
410 dev_dbg(kbdev->dev, "Interrupts are correctly assigned.\n");
413 kbase_pm_context_idle(kbdev);
417 #endif /* CONFIG_MALI_DEBUG */
419 int kbase_install_interrupts(struct kbase_device *kbdev)
421 u32 nr = ARRAY_SIZE(kbase_handler_table);
425 for (i = 0; i < nr; i++) {
426 err = request_irq(kbdev->irqs[i].irq, kbase_handler_table[i],
427 kbdev->irqs[i].flags | IRQF_SHARED,
428 dev_name(kbdev->dev),
429 kbase_tag(kbdev, i));
431 dev_err(kbdev->dev, "Can't request interrupt %d (index %d)\n",
432 kbdev->irqs[i].irq, i);
433 #ifdef CONFIG_SPARSE_IRQ
434 dev_err(kbdev->dev, "You have CONFIG_SPARSE_IRQ support enabled - is the interrupt number correct for this configuration?\n");
435 #endif /* CONFIG_SPARSE_IRQ */
444 free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
449 void kbase_release_interrupts(struct kbase_device *kbdev)
451 u32 nr = ARRAY_SIZE(kbase_handler_table);
454 for (i = 0; i < nr; i++) {
455 if (kbdev->irqs[i].irq)
456 free_irq(kbdev->irqs[i].irq, kbase_tag(kbdev, i));
460 void kbase_synchronize_irqs(struct kbase_device *kbdev)
462 u32 nr = ARRAY_SIZE(kbase_handler_table);
465 for (i = 0; i < nr; i++) {
466 if (kbdev->irqs[i].irq)
467 synchronize_irq(kbdev->irqs[i].irq);
471 #endif /* !defined(CONFIG_MALI_NO_MALI) */