Merge branch 'android-2.6.36' into android-tegra-2.6.36
[firefly-linux-kernel-4.4.55.git] / drivers / video / tegra / host / nvhost_intr.c
1 /*
2  * drivers/video/tegra/host/nvhost_intr.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "nvhost_intr.h"
24 #include "dev.h"
25 #include <linux/interrupt.h>
26 #include <linux/slab.h>
27 #include <linux/irq.h>
28
29 #define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
30
31
32 /*** HW sync point threshold interrupt management ***/
33
34 static void set_syncpt_threshold(void __iomem *sync_regs, u32 id, u32 thresh)
35 {
36         thresh &= 0xffff;
37         writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
38 }
39
40 static void enable_syncpt_interrupt(void __iomem *sync_regs, u32 id)
41 {
42         writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
43 }
44
45
46 /*** Wait list management ***/
47
48 struct nvhost_waitlist {
49         struct list_head list;
50         struct kref refcount;
51         u32 thresh;
52         enum nvhost_intr_action action;
53         atomic_t state;
54         void *data;
55         int count;
56 };
57
58 enum waitlist_state
59 {
60         WLS_PENDING,
61         WLS_REMOVED,
62         WLS_CANCELLED,
63         WLS_HANDLED
64 };
65
66 static void waiter_release(struct kref *kref)
67 {
68         kfree(container_of(kref, struct nvhost_waitlist, refcount));
69 }
70
71 /*
72  * add a waiter to a waiter queue, sorted by threshold
73  * returns true if it was added at the head of the queue
74  */
75 static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
76                                 struct list_head *queue)
77 {
78         struct nvhost_waitlist *pos;
79         u32 thresh = waiter->thresh;
80
81         list_for_each_entry_reverse(pos, queue, list)
82                 if ((s32)(pos->thresh - thresh) <= 0) {
83                         list_add(&waiter->list, &pos->list);
84                         return false;
85                 }
86
87         list_add(&waiter->list, queue);
88         return true;
89 }
90
91 /*
92  * run through a waiter queue for a single sync point ID
93  * and gather all completed waiters into lists by actions
94  */
95 static void remove_completed_waiters(struct list_head *head, u32 sync,
96                         struct list_head completed[NVHOST_INTR_ACTION_COUNT])
97 {
98         struct list_head *dest;
99         struct nvhost_waitlist *waiter, *next, *prev;
100
101         list_for_each_entry_safe(waiter, next, head, list) {
102                 if ((s32)(waiter->thresh - sync) > 0)
103                         break;
104
105                 dest = completed + waiter->action;
106
107                 /* consolidate submit cleanups */
108                 if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
109                         && !list_empty(dest)) {
110                         prev = list_entry(dest->prev,
111                                         struct nvhost_waitlist, list);
112                         if (prev->data == waiter->data) {
113                                 prev->count++;
114                                 dest = NULL;
115                         }
116                 }
117
118                 /* PENDING->REMOVED or CANCELLED->HANDLED */
119                 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
120                         list_del(&waiter->list);
121                         kref_put(&waiter->refcount, waiter_release);
122                 } else {
123                         list_move_tail(&waiter->list, dest);
124                 }
125         }
126 }
127
128 static void action_submit_complete(struct nvhost_waitlist *waiter)
129 {
130         struct nvhost_channel *channel = waiter->data;
131         int nr_completed = waiter->count;
132
133         nvhost_cdma_update(&channel->cdma);
134         nvhost_module_idle_mult(&channel->mod, nr_completed);
135 }
136
137 static void action_ctxsave(struct nvhost_waitlist *waiter)
138 {
139         struct nvhost_hwctx *hwctx = waiter->data;
140         struct nvhost_channel *channel = hwctx->channel;
141
142         channel->ctxhandler.save_service(hwctx);
143         channel->ctxhandler.put(hwctx);
144 }
145
146 static void action_wakeup(struct nvhost_waitlist *waiter)
147 {
148         wait_queue_head_t *wq = waiter->data;
149
150         wake_up(wq);
151 }
152
153 static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
154 {
155         wait_queue_head_t *wq = waiter->data;
156
157         wake_up_interruptible(wq);
158 }
159
160 typedef void (*action_handler)(struct nvhost_waitlist *waiter);
161
162 static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
163         action_submit_complete,
164         action_ctxsave,
165         action_wakeup,
166         action_wakeup_interruptible,
167 };
168
169 static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
170 {
171         struct list_head *head = completed;
172         int i;
173
174         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
175                 action_handler handler = action_handlers[i];
176                 struct nvhost_waitlist *waiter, *next;
177
178                 list_for_each_entry_safe(waiter, next, head, list) {
179                         list_del(&waiter->list);
180                         handler(waiter);
181                         WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
182                         kref_put(&waiter->refcount, waiter_release);
183                 }
184         }
185 }
186
187
188 /*** Interrupt service functions ***/
189
190 /**
191  * Host1x intterrupt service function
192  * Handles read / write failures
193  */
194 static irqreturn_t host1x_isr(int irq, void *dev_id)
195 {
196         struct nvhost_intr *intr = dev_id;
197         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
198         u32 stat;
199         u32 ext_stat;
200         u32 addr;
201
202         stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
203         ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
204
205         if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
206                 addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
207                 pr_err("Host read timeout at address %x\n", addr);
208         }
209
210         if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
211                 addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
212                 pr_err("Host write timeout at address %x\n", addr);
213         }
214
215         writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
216         writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
217
218         return IRQ_HANDLED;
219 }
220
221 /**
222  * Sync point threshold interrupt service function
223  * Handles sync point threshold triggers, in interrupt context
224  */
225 static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
226 {
227         struct nvhost_intr_syncpt *syncpt = dev_id;
228         unsigned int id = syncpt->id;
229         struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
230                                                 syncpt[id]);
231         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
232
233         writel(BIT(id),
234                 sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
235         writel(BIT(id),
236                 sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
237
238         return IRQ_WAKE_THREAD;
239 }
240
241
242 /**
243  * Sync point threshold interrupt service thread function
244  * Handles sync point threshold triggers, in thread context
245  */
246 static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
247 {
248         struct nvhost_intr_syncpt *syncpt = dev_id;
249         unsigned int id = syncpt->id;
250         struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
251                                                 syncpt[id]);
252         struct nvhost_master *dev = intr_to_dev(intr);
253         void __iomem *sync_regs = dev->sync_aperture;
254
255         struct list_head completed[NVHOST_INTR_ACTION_COUNT];
256         u32 sync;
257         unsigned int i;
258
259         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
260                 INIT_LIST_HEAD(completed + i);
261
262         sync = nvhost_syncpt_update_min(&dev->syncpt, id);
263
264         spin_lock(&syncpt->lock);
265
266         remove_completed_waiters(&syncpt->wait_head, sync, completed);
267
268         if (!list_empty(&syncpt->wait_head)) {
269                 u32 thresh = list_first_entry(&syncpt->wait_head,
270                                         struct nvhost_waitlist, list)->thresh;
271
272                 set_syncpt_threshold(sync_regs, id, thresh);
273                 enable_syncpt_interrupt(sync_regs, id);
274         }
275
276         spin_unlock(&syncpt->lock);
277
278         run_handlers(completed);
279
280         return IRQ_HANDLED;
281 }
282
283 /*
284  * lazily request a syncpt's irq
285  */
286 static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
287 {
288         static DEFINE_MUTEX(mutex);
289         int err;
290
291         mutex_lock(&mutex);
292         if (!syncpt->irq_requested) {
293                 err = request_threaded_irq(syncpt->irq,
294                                         syncpt_thresh_isr, syncpt_thresh_fn,
295                                         0, syncpt->thresh_irq_name, syncpt);
296                 if (!err)
297                         syncpt->irq_requested = 1;
298         }
299         mutex_unlock(&mutex);
300         return err;
301 }
302
303
304 /*** Main API ***/
305
306 int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
307                         enum nvhost_intr_action action, void *data,
308                         void **ref)
309 {
310         struct nvhost_waitlist *waiter;
311         struct nvhost_intr_syncpt *syncpt;
312         void __iomem *sync_regs;
313         int queue_was_empty;
314         int err;
315
316         /* create and initialize a new waiter */
317         waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
318         if (!waiter)
319                 return -ENOMEM;
320         INIT_LIST_HEAD(&waiter->list);
321         kref_init(&waiter->refcount);
322         if (ref)
323                 kref_get(&waiter->refcount);
324         waiter->thresh = thresh;
325         waiter->action = action;
326         atomic_set(&waiter->state, WLS_PENDING);
327         waiter->data = data;
328         waiter->count = 1;
329
330         BUG_ON(id >= NV_HOST1X_SYNCPT_NB_PTS);
331         syncpt = intr->syncpt + id;
332         sync_regs = intr_to_dev(intr)->sync_aperture;
333
334         spin_lock(&syncpt->lock);
335
336         /* lazily request irq for this sync point */
337         if (!syncpt->irq_requested) {
338                 spin_unlock(&syncpt->lock);
339
340                 err = request_syncpt_irq(syncpt);
341                 if (err) {
342                         kfree(waiter);
343                         return err;
344                 }
345
346                 spin_lock(&syncpt->lock);
347         }
348
349         queue_was_empty = list_empty(&syncpt->wait_head);
350
351         if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
352                 /* added at head of list - new threshold value */
353                 set_syncpt_threshold(sync_regs, id, thresh);
354
355                 /* added as first waiter - enable interrupt */
356                 if (queue_was_empty)
357                         enable_syncpt_interrupt(sync_regs, id);
358         }
359
360         spin_unlock(&syncpt->lock);
361
362         if (ref)
363                 *ref = waiter;
364         return 0;
365 }
366
367 void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
368 {
369         struct nvhost_waitlist *waiter = ref;
370
371         while (atomic_cmpxchg(&waiter->state,
372                                 WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
373                 schedule();
374
375         kref_put(&waiter->refcount, waiter_release);
376 }
377
378
379 /*** Init & shutdown ***/
380
381 int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
382 {
383         unsigned int id;
384         struct nvhost_intr_syncpt *syncpt;
385         int err;
386
387         err = request_irq(irq_gen, host1x_isr, 0, "host_status", intr);
388         if (err)
389                 goto fail;
390         intr->host1x_irq = irq_gen;
391         intr->host1x_isr_started = true;
392
393         for (id = 0, syncpt = intr->syncpt;
394              id < NV_HOST1X_SYNCPT_NB_PTS;
395              ++id, ++syncpt) {
396                 syncpt->id = id;
397                 syncpt->irq = irq_sync + id;
398                 syncpt->irq_requested = 0;
399                 spin_lock_init(&syncpt->lock);
400                 INIT_LIST_HEAD(&syncpt->wait_head);
401                 snprintf(syncpt->thresh_irq_name,
402                          sizeof(syncpt->thresh_irq_name),
403                          "%s", nvhost_syncpt_name(id));
404         }
405
406         return 0;
407
408 fail:
409         nvhost_intr_deinit(intr);
410         return err;
411 }
412
413 void nvhost_intr_deinit(struct nvhost_intr *intr)
414 {
415         unsigned int id;
416         struct nvhost_intr_syncpt *syncpt;
417
418         for (id = 0, syncpt = intr->syncpt;
419              id < NV_HOST1X_SYNCPT_NB_PTS;
420              ++id, ++syncpt) {
421                 struct nvhost_waitlist *waiter, *next;
422                 list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
423                         if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
424                                 == WLS_CANCELLED) {
425                                 list_del(&waiter->list);
426                                 kref_put(&waiter->refcount, waiter_release);
427                         }
428                 }
429
430                 if(!list_empty(&syncpt->wait_head)) {  // output diagnostics
431                         printk("%s id=%d\n",__func__,id);
432                         BUG_ON(1);
433                 }
434
435                 if (syncpt->irq_requested)
436                         free_irq(syncpt->irq, syncpt);
437         }
438
439         if (intr->host1x_isr_started) {
440                 free_irq(intr->host1x_irq, intr);
441                 intr->host1x_isr_started = false;
442         }
443 }
444
445 void nvhost_intr_configure (struct nvhost_intr *intr, u32 hz)
446 {
447         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
448
449         // write microsecond clock register
450         writel((hz + 1000000 - 1)/1000000, sync_regs + HOST1X_SYNC_USEC_CLK);
451
452         /* disable the ip_busy_timeout. this prevents write drops, etc.
453          * there's no real way to recover from a hung client anyway.
454          */
455         writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
456
457         /* increase the auto-ack timout to the maximum value. 2d will hang
458          * otherwise on ap20.
459          */
460         writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
461
462         /* disable interrupts for both cpu's */
463         writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0);
464         writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1);
465
466         /* masking all of the interrupts actually means "enable" */
467         writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
468
469         /* enable HOST_INT_C0MASK */
470         writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
471
472         /* enable HINTMASK_EXT */
473         writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
474
475         /* enable IP_READ_INT and IP_WRITE_INT */
476         writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
477 }