Merge tag 'msm-board-for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/davidb...
[firefly-linux-kernel-4.4.55.git] / arch / arm / plat-omap / dma.c
1 /*
2  * linux/arch/arm/plat-omap/dma.c
3  *
4  * Copyright (C) 2003 - 2008 Nokia Corporation
5  * Author: Juha Yrjölä <juha.yrjola@nokia.com>
6  * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
7  * Graphics DMA and LCD DMA graphics tranformations
8  * by Imre Deak <imre.deak@nokia.com>
9  * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc.
10  * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
11  * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
12  *
13  * Copyright (C) 2009 Texas Instruments
14  * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
15  *
16  * Support functions for the OMAP internal DMA channels.
17  *
18  * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
19  * Converted DMA library into DMA platform driver.
20  *      - G, Manjunath Kondaiah <manjugk@ti.com>
21  *
22  * This program is free software; you can redistribute it and/or modify
23  * it under the terms of the GNU General Public License version 2 as
24  * published by the Free Software Foundation.
25  *
26  */
27
28 #include <linux/module.h>
29 #include <linux/init.h>
30 #include <linux/sched.h>
31 #include <linux/spinlock.h>
32 #include <linux/errno.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/io.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38
39 #include <mach/hardware.h>
40 #include <plat/dma.h>
41
42 #include <plat/tc.h>
43
44 /*
45  * MAX_LOGICAL_DMA_CH_COUNT: the maximum number of logical DMA
46  * channels that an instance of the SDMA IP block can support.  Used
47  * to size arrays.  (The actual maximum on a particular SoC may be less
48  * than this -- for example, OMAP1 SDMA instances only support 17 logical
49  * DMA channels.)
50  */
51 #define MAX_LOGICAL_DMA_CH_COUNT                32
52
53 #undef DEBUG
54
55 #ifndef CONFIG_ARCH_OMAP1
56 enum { DMA_CH_ALLOC_DONE, DMA_CH_PARAMS_SET_DONE, DMA_CH_STARTED,
57         DMA_CH_QUEUED, DMA_CH_NOTSTARTED, DMA_CH_PAUSED, DMA_CH_LINK_ENABLED
58 };
59
60 enum { DMA_CHAIN_STARTED, DMA_CHAIN_NOTSTARTED };
61 #endif
62
63 #define OMAP_DMA_ACTIVE                 0x01
64 #define OMAP2_DMA_CSR_CLEAR_MASK        0xffffffff
65
66 #define OMAP_FUNC_MUX_ARM_BASE          (0xfffe1000 + 0xec)
67
68 static struct omap_system_dma_plat_info *p;
69 static struct omap_dma_dev_attr *d;
70
71 static int enable_1510_mode;
72 static u32 errata;
73
74 static struct omap_dma_global_context_registers {
75         u32 dma_irqenable_l0;
76         u32 dma_ocp_sysconfig;
77         u32 dma_gcr;
78 } omap_dma_global_context;
79
80 struct dma_link_info {
81         int *linked_dmach_q;
82         int no_of_lchs_linked;
83
84         int q_count;
85         int q_tail;
86         int q_head;
87
88         int chain_state;
89         int chain_mode;
90
91 };
92
93 static struct dma_link_info *dma_linked_lch;
94
95 #ifndef CONFIG_ARCH_OMAP1
96
97 /* Chain handling macros */
98 #define OMAP_DMA_CHAIN_QINIT(chain_id)                                  \
99         do {                                                            \
100                 dma_linked_lch[chain_id].q_head =                       \
101                 dma_linked_lch[chain_id].q_tail =                       \
102                 dma_linked_lch[chain_id].q_count = 0;                   \
103         } while (0)
104 #define OMAP_DMA_CHAIN_QFULL(chain_id)                                  \
105                 (dma_linked_lch[chain_id].no_of_lchs_linked ==          \
106                 dma_linked_lch[chain_id].q_count)
107 #define OMAP_DMA_CHAIN_QLAST(chain_id)                                  \
108         do {                                                            \
109                 ((dma_linked_lch[chain_id].no_of_lchs_linked-1) ==      \
110                 dma_linked_lch[chain_id].q_count)                       \
111         } while (0)
112 #define OMAP_DMA_CHAIN_QEMPTY(chain_id)                                 \
113                 (0 == dma_linked_lch[chain_id].q_count)
114 #define __OMAP_DMA_CHAIN_INCQ(end)                                      \
115         ((end) = ((end)+1) % dma_linked_lch[chain_id].no_of_lchs_linked)
116 #define OMAP_DMA_CHAIN_INCQHEAD(chain_id)                               \
117         do {                                                            \
118                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_head); \
119                 dma_linked_lch[chain_id].q_count--;                     \
120         } while (0)
121
122 #define OMAP_DMA_CHAIN_INCQTAIL(chain_id)                               \
123         do {                                                            \
124                 __OMAP_DMA_CHAIN_INCQ(dma_linked_lch[chain_id].q_tail); \
125                 dma_linked_lch[chain_id].q_count++; \
126         } while (0)
127 #endif
128
129 static int dma_lch_count;
130 static int dma_chan_count;
131 static int omap_dma_reserve_channels;
132
133 static spinlock_t dma_chan_lock;
134 static struct omap_dma_lch *dma_chan;
135
136 static inline void disable_lnk(int lch);
137 static void omap_disable_channel_irq(int lch);
138 static inline void omap_enable_channel_irq(int lch);
139
140 #define REVISIT_24XX()          printk(KERN_ERR "FIXME: no %s on 24xx\n", \
141                                                 __func__);
142
143 #ifdef CONFIG_ARCH_OMAP15XX
144 /* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
145 static int omap_dma_in_1510_mode(void)
146 {
147         return enable_1510_mode;
148 }
149 #else
150 #define omap_dma_in_1510_mode()         0
151 #endif
152
153 #ifdef CONFIG_ARCH_OMAP1
154 static inline int get_gdma_dev(int req)
155 {
156         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
157         int shift = ((req - 1) % 5) * 6;
158
159         return ((omap_readl(reg) >> shift) & 0x3f) + 1;
160 }
161
162 static inline void set_gdma_dev(int req, int dev)
163 {
164         u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
165         int shift = ((req - 1) % 5) * 6;
166         u32 l;
167
168         l = omap_readl(reg);
169         l &= ~(0x3f << shift);
170         l |= (dev - 1) << shift;
171         omap_writel(l, reg);
172 }
173 #else
174 #define set_gdma_dev(req, dev)  do {} while (0)
175 #define omap_readl(reg)         0
176 #define omap_writel(val, reg)   do {} while (0)
177 #endif
178
179 void omap_set_dma_priority(int lch, int dst_port, int priority)
180 {
181         unsigned long reg;
182         u32 l;
183
184         if (cpu_class_is_omap1()) {
185                 switch (dst_port) {
186                 case OMAP_DMA_PORT_OCP_T1:      /* FFFECC00 */
187                         reg = OMAP_TC_OCPT1_PRIOR;
188                         break;
189                 case OMAP_DMA_PORT_OCP_T2:      /* FFFECCD0 */
190                         reg = OMAP_TC_OCPT2_PRIOR;
191                         break;
192                 case OMAP_DMA_PORT_EMIFF:       /* FFFECC08 */
193                         reg = OMAP_TC_EMIFF_PRIOR;
194                         break;
195                 case OMAP_DMA_PORT_EMIFS:       /* FFFECC04 */
196                         reg = OMAP_TC_EMIFS_PRIOR;
197                         break;
198                 default:
199                         BUG();
200                         return;
201                 }
202                 l = omap_readl(reg);
203                 l &= ~(0xf << 8);
204                 l |= (priority & 0xf) << 8;
205                 omap_writel(l, reg);
206         }
207
208         if (cpu_class_is_omap2()) {
209                 u32 ccr;
210
211                 ccr = p->dma_read(CCR, lch);
212                 if (priority)
213                         ccr |= (1 << 6);
214                 else
215                         ccr &= ~(1 << 6);
216                 p->dma_write(ccr, CCR, lch);
217         }
218 }
219 EXPORT_SYMBOL(omap_set_dma_priority);
220
221 void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
222                                   int frame_count, int sync_mode,
223                                   int dma_trigger, int src_or_dst_synch)
224 {
225         u32 l;
226
227         l = p->dma_read(CSDP, lch);
228         l &= ~0x03;
229         l |= data_type;
230         p->dma_write(l, CSDP, lch);
231
232         if (cpu_class_is_omap1()) {
233                 u16 ccr;
234
235                 ccr = p->dma_read(CCR, lch);
236                 ccr &= ~(1 << 5);
237                 if (sync_mode == OMAP_DMA_SYNC_FRAME)
238                         ccr |= 1 << 5;
239                 p->dma_write(ccr, CCR, lch);
240
241                 ccr = p->dma_read(CCR2, lch);
242                 ccr &= ~(1 << 2);
243                 if (sync_mode == OMAP_DMA_SYNC_BLOCK)
244                         ccr |= 1 << 2;
245                 p->dma_write(ccr, CCR2, lch);
246         }
247
248         if (cpu_class_is_omap2() && dma_trigger) {
249                 u32 val;
250
251                 val = p->dma_read(CCR, lch);
252
253                 /* DMA_SYNCHRO_CONTROL_UPPER depends on the channel number */
254                 val &= ~((1 << 23) | (3 << 19) | 0x1f);
255                 val |= (dma_trigger & ~0x1f) << 14;
256                 val |= dma_trigger & 0x1f;
257
258                 if (sync_mode & OMAP_DMA_SYNC_FRAME)
259                         val |= 1 << 5;
260                 else
261                         val &= ~(1 << 5);
262
263                 if (sync_mode & OMAP_DMA_SYNC_BLOCK)
264                         val |= 1 << 18;
265                 else
266                         val &= ~(1 << 18);
267
268                 if (src_or_dst_synch == OMAP_DMA_DST_SYNC_PREFETCH) {
269                         val &= ~(1 << 24);      /* dest synch */
270                         val |= (1 << 23);       /* Prefetch */
271                 } else if (src_or_dst_synch) {
272                         val |= 1 << 24;         /* source synch */
273                 } else {
274                         val &= ~(1 << 24);      /* dest synch */
275                 }
276                 p->dma_write(val, CCR, lch);
277         }
278
279         p->dma_write(elem_count, CEN, lch);
280         p->dma_write(frame_count, CFN, lch);
281 }
282 EXPORT_SYMBOL(omap_set_dma_transfer_params);
283
284 void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
285 {
286         BUG_ON(omap_dma_in_1510_mode());
287
288         if (cpu_class_is_omap1()) {
289                 u16 w;
290
291                 w = p->dma_read(CCR2, lch);
292                 w &= ~0x03;
293
294                 switch (mode) {
295                 case OMAP_DMA_CONSTANT_FILL:
296                         w |= 0x01;
297                         break;
298                 case OMAP_DMA_TRANSPARENT_COPY:
299                         w |= 0x02;
300                         break;
301                 case OMAP_DMA_COLOR_DIS:
302                         break;
303                 default:
304                         BUG();
305                 }
306                 p->dma_write(w, CCR2, lch);
307
308                 w = p->dma_read(LCH_CTRL, lch);
309                 w &= ~0x0f;
310                 /* Default is channel type 2D */
311                 if (mode) {
312                         p->dma_write(color, COLOR, lch);
313                         w |= 1;         /* Channel type G */
314                 }
315                 p->dma_write(w, LCH_CTRL, lch);
316         }
317
318         if (cpu_class_is_omap2()) {
319                 u32 val;
320
321                 val = p->dma_read(CCR, lch);
322                 val &= ~((1 << 17) | (1 << 16));
323
324                 switch (mode) {
325                 case OMAP_DMA_CONSTANT_FILL:
326                         val |= 1 << 16;
327                         break;
328                 case OMAP_DMA_TRANSPARENT_COPY:
329                         val |= 1 << 17;
330                         break;
331                 case OMAP_DMA_COLOR_DIS:
332                         break;
333                 default:
334                         BUG();
335                 }
336                 p->dma_write(val, CCR, lch);
337
338                 color &= 0xffffff;
339                 p->dma_write(color, COLOR, lch);
340         }
341 }
342 EXPORT_SYMBOL(omap_set_dma_color_mode);
343
344 void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode)
345 {
346         if (cpu_class_is_omap2()) {
347                 u32 csdp;
348
349                 csdp = p->dma_read(CSDP, lch);
350                 csdp &= ~(0x3 << 16);
351                 csdp |= (mode << 16);
352                 p->dma_write(csdp, CSDP, lch);
353         }
354 }
355 EXPORT_SYMBOL(omap_set_dma_write_mode);
356
357 void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode)
358 {
359         if (cpu_class_is_omap1() && !cpu_is_omap15xx()) {
360                 u32 l;
361
362                 l = p->dma_read(LCH_CTRL, lch);
363                 l &= ~0x7;
364                 l |= mode;
365                 p->dma_write(l, LCH_CTRL, lch);
366         }
367 }
368 EXPORT_SYMBOL(omap_set_dma_channel_mode);
369
370 /* Note that src_port is only for omap1 */
371 void omap_set_dma_src_params(int lch, int src_port, int src_amode,
372                              unsigned long src_start,
373                              int src_ei, int src_fi)
374 {
375         u32 l;
376
377         if (cpu_class_is_omap1()) {
378                 u16 w;
379
380                 w = p->dma_read(CSDP, lch);
381                 w &= ~(0x1f << 2);
382                 w |= src_port << 2;
383                 p->dma_write(w, CSDP, lch);
384         }
385
386         l = p->dma_read(CCR, lch);
387         l &= ~(0x03 << 12);
388         l |= src_amode << 12;
389         p->dma_write(l, CCR, lch);
390
391         p->dma_write(src_start, CSSA, lch);
392
393         p->dma_write(src_ei, CSEI, lch);
394         p->dma_write(src_fi, CSFI, lch);
395 }
396 EXPORT_SYMBOL(omap_set_dma_src_params);
397
398 void omap_set_dma_params(int lch, struct omap_dma_channel_params *params)
399 {
400         omap_set_dma_transfer_params(lch, params->data_type,
401                                      params->elem_count, params->frame_count,
402                                      params->sync_mode, params->trigger,
403                                      params->src_or_dst_synch);
404         omap_set_dma_src_params(lch, params->src_port,
405                                 params->src_amode, params->src_start,
406                                 params->src_ei, params->src_fi);
407
408         omap_set_dma_dest_params(lch, params->dst_port,
409                                  params->dst_amode, params->dst_start,
410                                  params->dst_ei, params->dst_fi);
411         if (params->read_prio || params->write_prio)
412                 omap_dma_set_prio_lch(lch, params->read_prio,
413                                       params->write_prio);
414 }
415 EXPORT_SYMBOL(omap_set_dma_params);
416
417 void omap_set_dma_src_index(int lch, int eidx, int fidx)
418 {
419         if (cpu_class_is_omap2())
420                 return;
421
422         p->dma_write(eidx, CSEI, lch);
423         p->dma_write(fidx, CSFI, lch);
424 }
425 EXPORT_SYMBOL(omap_set_dma_src_index);
426
427 void omap_set_dma_src_data_pack(int lch, int enable)
428 {
429         u32 l;
430
431         l = p->dma_read(CSDP, lch);
432         l &= ~(1 << 6);
433         if (enable)
434                 l |= (1 << 6);
435         p->dma_write(l, CSDP, lch);
436 }
437 EXPORT_SYMBOL(omap_set_dma_src_data_pack);
438
439 void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
440 {
441         unsigned int burst = 0;
442         u32 l;
443
444         l = p->dma_read(CSDP, lch);
445         l &= ~(0x03 << 7);
446
447         switch (burst_mode) {
448         case OMAP_DMA_DATA_BURST_DIS:
449                 break;
450         case OMAP_DMA_DATA_BURST_4:
451                 if (cpu_class_is_omap2())
452                         burst = 0x1;
453                 else
454                         burst = 0x2;
455                 break;
456         case OMAP_DMA_DATA_BURST_8:
457                 if (cpu_class_is_omap2()) {
458                         burst = 0x2;
459                         break;
460                 }
461                 /*
462                  * not supported by current hardware on OMAP1
463                  * w |= (0x03 << 7);
464                  * fall through
465                  */
466         case OMAP_DMA_DATA_BURST_16:
467                 if (cpu_class_is_omap2()) {
468                         burst = 0x3;
469                         break;
470                 }
471                 /*
472                  * OMAP1 don't support burst 16
473                  * fall through
474                  */
475         default:
476                 BUG();
477         }
478
479         l |= (burst << 7);
480         p->dma_write(l, CSDP, lch);
481 }
482 EXPORT_SYMBOL(omap_set_dma_src_burst_mode);
483
484 /* Note that dest_port is only for OMAP1 */
485 void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
486                               unsigned long dest_start,
487                               int dst_ei, int dst_fi)
488 {
489         u32 l;
490
491         if (cpu_class_is_omap1()) {
492                 l = p->dma_read(CSDP, lch);
493                 l &= ~(0x1f << 9);
494                 l |= dest_port << 9;
495                 p->dma_write(l, CSDP, lch);
496         }
497
498         l = p->dma_read(CCR, lch);
499         l &= ~(0x03 << 14);
500         l |= dest_amode << 14;
501         p->dma_write(l, CCR, lch);
502
503         p->dma_write(dest_start, CDSA, lch);
504
505         p->dma_write(dst_ei, CDEI, lch);
506         p->dma_write(dst_fi, CDFI, lch);
507 }
508 EXPORT_SYMBOL(omap_set_dma_dest_params);
509
510 void omap_set_dma_dest_index(int lch, int eidx, int fidx)
511 {
512         if (cpu_class_is_omap2())
513                 return;
514
515         p->dma_write(eidx, CDEI, lch);
516         p->dma_write(fidx, CDFI, lch);
517 }
518 EXPORT_SYMBOL(omap_set_dma_dest_index);
519
520 void omap_set_dma_dest_data_pack(int lch, int enable)
521 {
522         u32 l;
523
524         l = p->dma_read(CSDP, lch);
525         l &= ~(1 << 13);
526         if (enable)
527                 l |= 1 << 13;
528         p->dma_write(l, CSDP, lch);
529 }
530 EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
531
532 void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
533 {
534         unsigned int burst = 0;
535         u32 l;
536
537         l = p->dma_read(CSDP, lch);
538         l &= ~(0x03 << 14);
539
540         switch (burst_mode) {
541         case OMAP_DMA_DATA_BURST_DIS:
542                 break;
543         case OMAP_DMA_DATA_BURST_4:
544                 if (cpu_class_is_omap2())
545                         burst = 0x1;
546                 else
547                         burst = 0x2;
548                 break;
549         case OMAP_DMA_DATA_BURST_8:
550                 if (cpu_class_is_omap2())
551                         burst = 0x2;
552                 else
553                         burst = 0x3;
554                 break;
555         case OMAP_DMA_DATA_BURST_16:
556                 if (cpu_class_is_omap2()) {
557                         burst = 0x3;
558                         break;
559                 }
560                 /*
561                  * OMAP1 don't support burst 16
562                  * fall through
563                  */
564         default:
565                 printk(KERN_ERR "Invalid DMA burst mode\n");
566                 BUG();
567                 return;
568         }
569         l |= (burst << 14);
570         p->dma_write(l, CSDP, lch);
571 }
572 EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
573
574 static inline void omap_enable_channel_irq(int lch)
575 {
576         /* Clear CSR */
577         if (cpu_class_is_omap1())
578                 p->dma_read(CSR, lch);
579         else
580                 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
581
582         /* Enable some nice interrupts. */
583         p->dma_write(dma_chan[lch].enabled_irqs, CICR, lch);
584 }
585
586 static inline void omap_disable_channel_irq(int lch)
587 {
588         /* disable channel interrupts */
589         p->dma_write(0, CICR, lch);
590         /* Clear CSR */
591         if (cpu_class_is_omap1())
592                 p->dma_read(CSR, lch);
593         else
594                 p->dma_write(OMAP2_DMA_CSR_CLEAR_MASK, CSR, lch);
595 }
596
597 void omap_enable_dma_irq(int lch, u16 bits)
598 {
599         dma_chan[lch].enabled_irqs |= bits;
600 }
601 EXPORT_SYMBOL(omap_enable_dma_irq);
602
603 void omap_disable_dma_irq(int lch, u16 bits)
604 {
605         dma_chan[lch].enabled_irqs &= ~bits;
606 }
607 EXPORT_SYMBOL(omap_disable_dma_irq);
608
609 static inline void enable_lnk(int lch)
610 {
611         u32 l;
612
613         l = p->dma_read(CLNK_CTRL, lch);
614
615         if (cpu_class_is_omap1())
616                 l &= ~(1 << 14);
617
618         /* Set the ENABLE_LNK bits */
619         if (dma_chan[lch].next_lch != -1)
620                 l = dma_chan[lch].next_lch | (1 << 15);
621
622 #ifndef CONFIG_ARCH_OMAP1
623         if (cpu_class_is_omap2())
624                 if (dma_chan[lch].next_linked_ch != -1)
625                         l = dma_chan[lch].next_linked_ch | (1 << 15);
626 #endif
627
628         p->dma_write(l, CLNK_CTRL, lch);
629 }
630
631 static inline void disable_lnk(int lch)
632 {
633         u32 l;
634
635         l = p->dma_read(CLNK_CTRL, lch);
636
637         /* Disable interrupts */
638         omap_disable_channel_irq(lch);
639
640         if (cpu_class_is_omap1()) {
641                 /* Set the STOP_LNK bit */
642                 l |= 1 << 14;
643         }
644
645         if (cpu_class_is_omap2()) {
646                 /* Clear the ENABLE_LNK bit */
647                 l &= ~(1 << 15);
648         }
649
650         p->dma_write(l, CLNK_CTRL, lch);
651         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
652 }
653
654 static inline void omap2_enable_irq_lch(int lch)
655 {
656         u32 val;
657         unsigned long flags;
658
659         if (!cpu_class_is_omap2())
660                 return;
661
662         spin_lock_irqsave(&dma_chan_lock, flags);
663         /* clear IRQ STATUS */
664         p->dma_write(1 << lch, IRQSTATUS_L0, lch);
665         /* Enable interrupt */
666         val = p->dma_read(IRQENABLE_L0, lch);
667         val |= 1 << lch;
668         p->dma_write(val, IRQENABLE_L0, lch);
669         spin_unlock_irqrestore(&dma_chan_lock, flags);
670 }
671
672 static inline void omap2_disable_irq_lch(int lch)
673 {
674         u32 val;
675         unsigned long flags;
676
677         if (!cpu_class_is_omap2())
678                 return;
679
680         spin_lock_irqsave(&dma_chan_lock, flags);
681         /* Disable interrupt */
682         val = p->dma_read(IRQENABLE_L0, lch);
683         val &= ~(1 << lch);
684         p->dma_write(val, IRQENABLE_L0, lch);
685         /* clear IRQ STATUS */
686         p->dma_write(1 << lch, IRQSTATUS_L0, lch);
687         spin_unlock_irqrestore(&dma_chan_lock, flags);
688 }
689
690 int omap_request_dma(int dev_id, const char *dev_name,
691                      void (*callback)(int lch, u16 ch_status, void *data),
692                      void *data, int *dma_ch_out)
693 {
694         int ch, free_ch = -1;
695         unsigned long flags;
696         struct omap_dma_lch *chan;
697
698         spin_lock_irqsave(&dma_chan_lock, flags);
699         for (ch = 0; ch < dma_chan_count; ch++) {
700                 if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
701                         free_ch = ch;
702                         if (dev_id == 0)
703                                 break;
704                 }
705         }
706         if (free_ch == -1) {
707                 spin_unlock_irqrestore(&dma_chan_lock, flags);
708                 return -EBUSY;
709         }
710         chan = dma_chan + free_ch;
711         chan->dev_id = dev_id;
712
713         if (p->clear_lch_regs)
714                 p->clear_lch_regs(free_ch);
715
716         if (cpu_class_is_omap2())
717                 omap_clear_dma(free_ch);
718
719         spin_unlock_irqrestore(&dma_chan_lock, flags);
720
721         chan->dev_name = dev_name;
722         chan->callback = callback;
723         chan->data = data;
724         chan->flags = 0;
725
726 #ifndef CONFIG_ARCH_OMAP1
727         if (cpu_class_is_omap2()) {
728                 chan->chain_id = -1;
729                 chan->next_linked_ch = -1;
730         }
731 #endif
732
733         chan->enabled_irqs = OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
734
735         if (cpu_class_is_omap1())
736                 chan->enabled_irqs |= OMAP1_DMA_TOUT_IRQ;
737         else if (cpu_class_is_omap2())
738                 chan->enabled_irqs |= OMAP2_DMA_MISALIGNED_ERR_IRQ |
739                         OMAP2_DMA_TRANS_ERR_IRQ;
740
741         if (cpu_is_omap16xx()) {
742                 /* If the sync device is set, configure it dynamically. */
743                 if (dev_id != 0) {
744                         set_gdma_dev(free_ch + 1, dev_id);
745                         dev_id = free_ch + 1;
746                 }
747                 /*
748                  * Disable the 1510 compatibility mode and set the sync device
749                  * id.
750                  */
751                 p->dma_write(dev_id | (1 << 10), CCR, free_ch);
752         } else if (cpu_is_omap7xx() || cpu_is_omap15xx()) {
753                 p->dma_write(dev_id, CCR, free_ch);
754         }
755
756         if (cpu_class_is_omap2()) {
757                 omap_enable_channel_irq(free_ch);
758                 omap2_enable_irq_lch(free_ch);
759         }
760
761         *dma_ch_out = free_ch;
762
763         return 0;
764 }
765 EXPORT_SYMBOL(omap_request_dma);
766
767 void omap_free_dma(int lch)
768 {
769         unsigned long flags;
770
771         if (dma_chan[lch].dev_id == -1) {
772                 pr_err("omap_dma: trying to free unallocated DMA channel %d\n",
773                        lch);
774                 return;
775         }
776
777         /* Disable interrupt for logical channel */
778         if (cpu_class_is_omap2())
779                 omap2_disable_irq_lch(lch);
780
781         /* Disable all DMA interrupts for the channel. */
782         omap_disable_channel_irq(lch);
783
784         /* Make sure the DMA transfer is stopped. */
785         p->dma_write(0, CCR, lch);
786
787         /* Clear registers */
788         if (cpu_class_is_omap2())
789                 omap_clear_dma(lch);
790
791         spin_lock_irqsave(&dma_chan_lock, flags);
792         dma_chan[lch].dev_id = -1;
793         dma_chan[lch].next_lch = -1;
794         dma_chan[lch].callback = NULL;
795         spin_unlock_irqrestore(&dma_chan_lock, flags);
796 }
797 EXPORT_SYMBOL(omap_free_dma);
798
799 /**
800  * @brief omap_dma_set_global_params : Set global priority settings for dma
801  *
802  * @param arb_rate
803  * @param max_fifo_depth
804  * @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
805  *                                                 DMA_THREAD_RESERVE_ONET
806  *                                                 DMA_THREAD_RESERVE_TWOT
807  *                                                 DMA_THREAD_RESERVE_THREET
808  */
809 void
810 omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
811 {
812         u32 reg;
813
814         if (!cpu_class_is_omap2()) {
815                 printk(KERN_ERR "FIXME: no %s on 15xx/16xx\n", __func__);
816                 return;
817         }
818
819         if (max_fifo_depth == 0)
820                 max_fifo_depth = 1;
821         if (arb_rate == 0)
822                 arb_rate = 1;
823
824         reg = 0xff & max_fifo_depth;
825         reg |= (0x3 & tparams) << 12;
826         reg |= (arb_rate & 0xff) << 16;
827
828         p->dma_write(reg, GCR, 0);
829 }
830 EXPORT_SYMBOL(omap_dma_set_global_params);
831
832 /**
833  * @brief omap_dma_set_prio_lch : Set channel wise priority settings
834  *
835  * @param lch
836  * @param read_prio - Read priority
837  * @param write_prio - Write priority
838  * Both of the above can be set with one of the following values :
839  *      DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
840  */
841 int
842 omap_dma_set_prio_lch(int lch, unsigned char read_prio,
843                       unsigned char write_prio)
844 {
845         u32 l;
846
847         if (unlikely((lch < 0 || lch >= dma_lch_count))) {
848                 printk(KERN_ERR "Invalid channel id\n");
849                 return -EINVAL;
850         }
851         l = p->dma_read(CCR, lch);
852         l &= ~((1 << 6) | (1 << 26));
853         if (cpu_class_is_omap2() && !cpu_is_omap242x())
854                 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
855         else
856                 l |= ((read_prio & 0x1) << 6);
857
858         p->dma_write(l, CCR, lch);
859
860         return 0;
861 }
862 EXPORT_SYMBOL(omap_dma_set_prio_lch);
863
864 /*
865  * Clears any DMA state so the DMA engine is ready to restart with new buffers
866  * through omap_start_dma(). Any buffers in flight are discarded.
867  */
868 void omap_clear_dma(int lch)
869 {
870         unsigned long flags;
871
872         local_irq_save(flags);
873         p->clear_dma(lch);
874         local_irq_restore(flags);
875 }
876 EXPORT_SYMBOL(omap_clear_dma);
877
878 void omap_start_dma(int lch)
879 {
880         u32 l;
881
882         /*
883          * The CPC/CDAC register needs to be initialized to zero
884          * before starting dma transfer.
885          */
886         if (cpu_is_omap15xx())
887                 p->dma_write(0, CPC, lch);
888         else
889                 p->dma_write(0, CDAC, lch);
890
891         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
892                 int next_lch, cur_lch;
893                 char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
894
895                 dma_chan_link_map[lch] = 1;
896                 /* Set the link register of the first channel */
897                 enable_lnk(lch);
898
899                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
900                 cur_lch = dma_chan[lch].next_lch;
901                 do {
902                         next_lch = dma_chan[cur_lch].next_lch;
903
904                         /* The loop case: we've been here already */
905                         if (dma_chan_link_map[cur_lch])
906                                 break;
907                         /* Mark the current channel */
908                         dma_chan_link_map[cur_lch] = 1;
909
910                         enable_lnk(cur_lch);
911                         omap_enable_channel_irq(cur_lch);
912
913                         cur_lch = next_lch;
914                 } while (next_lch != -1);
915         } else if (IS_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS))
916                 p->dma_write(lch, CLNK_CTRL, lch);
917
918         omap_enable_channel_irq(lch);
919
920         l = p->dma_read(CCR, lch);
921
922         if (IS_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING))
923                         l |= OMAP_DMA_CCR_BUFFERING_DISABLE;
924         l |= OMAP_DMA_CCR_EN;
925
926         /*
927          * As dma_write() uses IO accessors which are weakly ordered, there
928          * is no guarantee that data in coherent DMA memory will be visible
929          * to the DMA device.  Add a memory barrier here to ensure that any
930          * such data is visible prior to enabling DMA.
931          */
932         mb();
933         p->dma_write(l, CCR, lch);
934
935         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
936 }
937 EXPORT_SYMBOL(omap_start_dma);
938
939 void omap_stop_dma(int lch)
940 {
941         u32 l;
942
943         /* Disable all interrupts on the channel */
944         omap_disable_channel_irq(lch);
945
946         l = p->dma_read(CCR, lch);
947         if (IS_DMA_ERRATA(DMA_ERRATA_i541) &&
948                         (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) {
949                 int i = 0;
950                 u32 sys_cf;
951
952                 /* Configure No-Standby */
953                 l = p->dma_read(OCP_SYSCONFIG, lch);
954                 sys_cf = l;
955                 l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK;
956                 l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
957                 p->dma_write(l , OCP_SYSCONFIG, 0);
958
959                 l = p->dma_read(CCR, lch);
960                 l &= ~OMAP_DMA_CCR_EN;
961                 p->dma_write(l, CCR, lch);
962
963                 /* Wait for sDMA FIFO drain */
964                 l = p->dma_read(CCR, lch);
965                 while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE |
966                                         OMAP_DMA_CCR_WR_ACTIVE))) {
967                         udelay(5);
968                         i++;
969                         l = p->dma_read(CCR, lch);
970                 }
971                 if (i >= 100)
972                         printk(KERN_ERR "DMA drain did not complete on "
973                                         "lch %d\n", lch);
974                 /* Restore OCP_SYSCONFIG */
975                 p->dma_write(sys_cf, OCP_SYSCONFIG, lch);
976         } else {
977                 l &= ~OMAP_DMA_CCR_EN;
978                 p->dma_write(l, CCR, lch);
979         }
980
981         /*
982          * Ensure that data transferred by DMA is visible to any access
983          * after DMA has been disabled.  This is important for coherent
984          * DMA regions.
985          */
986         mb();
987
988         if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
989                 int next_lch, cur_lch = lch;
990                 char dma_chan_link_map[MAX_LOGICAL_DMA_CH_COUNT];
991
992                 memset(dma_chan_link_map, 0, sizeof(dma_chan_link_map));
993                 do {
994                         /* The loop case: we've been here already */
995                         if (dma_chan_link_map[cur_lch])
996                                 break;
997                         /* Mark the current channel */
998                         dma_chan_link_map[cur_lch] = 1;
999
1000                         disable_lnk(cur_lch);
1001
1002                         next_lch = dma_chan[cur_lch].next_lch;
1003                         cur_lch = next_lch;
1004                 } while (next_lch != -1);
1005         }
1006
1007         dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
1008 }
1009 EXPORT_SYMBOL(omap_stop_dma);
1010
1011 /*
1012  * Allows changing the DMA callback function or data. This may be needed if
1013  * the driver shares a single DMA channel for multiple dma triggers.
1014  */
1015 int omap_set_dma_callback(int lch,
1016                           void (*callback)(int lch, u16 ch_status, void *data),
1017                           void *data)
1018 {
1019         unsigned long flags;
1020
1021         if (lch < 0)
1022                 return -ENODEV;
1023
1024         spin_lock_irqsave(&dma_chan_lock, flags);
1025         if (dma_chan[lch].dev_id == -1) {
1026                 printk(KERN_ERR "DMA callback for not set for free channel\n");
1027                 spin_unlock_irqrestore(&dma_chan_lock, flags);
1028                 return -EINVAL;
1029         }
1030         dma_chan[lch].callback = callback;
1031         dma_chan[lch].data = data;
1032         spin_unlock_irqrestore(&dma_chan_lock, flags);
1033
1034         return 0;
1035 }
1036 EXPORT_SYMBOL(omap_set_dma_callback);
1037
1038 /*
1039  * Returns current physical source address for the given DMA channel.
1040  * If the channel is running the caller must disable interrupts prior calling
1041  * this function and process the returned value before re-enabling interrupt to
1042  * prevent races with the interrupt handler. Note that in continuous mode there
1043  * is a chance for CSSA_L register overflow between the two reads resulting
1044  * in incorrect return value.
1045  */
1046 dma_addr_t omap_get_dma_src_pos(int lch)
1047 {
1048         dma_addr_t offset = 0;
1049
1050         if (cpu_is_omap15xx())
1051                 offset = p->dma_read(CPC, lch);
1052         else
1053                 offset = p->dma_read(CSAC, lch);
1054
1055         if (IS_DMA_ERRATA(DMA_ERRATA_3_3) && offset == 0)
1056                 offset = p->dma_read(CSAC, lch);
1057
1058         if (!cpu_is_omap15xx()) {
1059                 /*
1060                  * CDAC == 0 indicates that the DMA transfer on the channel has
1061                  * not been started (no data has been transferred so far).
1062                  * Return the programmed source start address in this case.
1063                  */
1064                 if (likely(p->dma_read(CDAC, lch)))
1065                         offset = p->dma_read(CSAC, lch);
1066                 else
1067                         offset = p->dma_read(CSSA, lch);
1068         }
1069
1070         if (cpu_class_is_omap1())
1071                 offset |= (p->dma_read(CSSA, lch) & 0xFFFF0000);
1072
1073         return offset;
1074 }
1075 EXPORT_SYMBOL(omap_get_dma_src_pos);
1076
1077 /*
1078  * Returns current physical destination address for the given DMA channel.
1079  * If the channel is running the caller must disable interrupts prior calling
1080  * this function and process the returned value before re-enabling interrupt to
1081  * prevent races with the interrupt handler. Note that in continuous mode there
1082  * is a chance for CDSA_L register overflow between the two reads resulting
1083  * in incorrect return value.
1084  */
1085 dma_addr_t omap_get_dma_dst_pos(int lch)
1086 {
1087         dma_addr_t offset = 0;
1088
1089         if (cpu_is_omap15xx())
1090                 offset = p->dma_read(CPC, lch);
1091         else
1092                 offset = p->dma_read(CDAC, lch);
1093
1094         /*
1095          * omap 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
1096          * read before the DMA controller finished disabling the channel.
1097          */
1098         if (!cpu_is_omap15xx() && offset == 0) {
1099                 offset = p->dma_read(CDAC, lch);
1100                 /*
1101                  * CDAC == 0 indicates that the DMA transfer on the channel has
1102                  * not been started (no data has been transferred so far).
1103                  * Return the programmed destination start address in this case.
1104                  */
1105                 if (unlikely(!offset))
1106                         offset = p->dma_read(CDSA, lch);
1107         }
1108
1109         if (cpu_class_is_omap1())
1110                 offset |= (p->dma_read(CDSA, lch) & 0xFFFF0000);
1111
1112         return offset;
1113 }
1114 EXPORT_SYMBOL(omap_get_dma_dst_pos);
1115
1116 int omap_get_dma_active_status(int lch)
1117 {
1118         return (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN) != 0;
1119 }
1120 EXPORT_SYMBOL(omap_get_dma_active_status);
1121
1122 int omap_dma_running(void)
1123 {
1124         int lch;
1125
1126         if (cpu_class_is_omap1())
1127                 if (omap_lcd_dma_running())
1128                         return 1;
1129
1130         for (lch = 0; lch < dma_chan_count; lch++)
1131                 if (p->dma_read(CCR, lch) & OMAP_DMA_CCR_EN)
1132                         return 1;
1133
1134         return 0;
1135 }
1136
1137 /*
1138  * lch_queue DMA will start right after lch_head one is finished.
1139  * For this DMA link to start, you still need to start (see omap_start_dma)
1140  * the first one. That will fire up the entire queue.
1141  */
1142 void omap_dma_link_lch(int lch_head, int lch_queue)
1143 {
1144         if (omap_dma_in_1510_mode()) {
1145                 if (lch_head == lch_queue) {
1146                         p->dma_write(p->dma_read(CCR, lch_head) | (3 << 8),
1147                                                                 CCR, lch_head);
1148                         return;
1149                 }
1150                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1151                 BUG();
1152                 return;
1153         }
1154
1155         if ((dma_chan[lch_head].dev_id == -1) ||
1156             (dma_chan[lch_queue].dev_id == -1)) {
1157                 printk(KERN_ERR "omap_dma: trying to link "
1158                        "non requested channels\n");
1159                 dump_stack();
1160         }
1161
1162         dma_chan[lch_head].next_lch = lch_queue;
1163 }
1164 EXPORT_SYMBOL(omap_dma_link_lch);
1165
1166 /*
1167  * Once the DMA queue is stopped, we can destroy it.
1168  */
1169 void omap_dma_unlink_lch(int lch_head, int lch_queue)
1170 {
1171         if (omap_dma_in_1510_mode()) {
1172                 if (lch_head == lch_queue) {
1173                         p->dma_write(p->dma_read(CCR, lch_head) & ~(3 << 8),
1174                                                                 CCR, lch_head);
1175                         return;
1176                 }
1177                 printk(KERN_ERR "DMA linking is not supported in 1510 mode\n");
1178                 BUG();
1179                 return;
1180         }
1181
1182         if (dma_chan[lch_head].next_lch != lch_queue ||
1183             dma_chan[lch_head].next_lch == -1) {
1184                 printk(KERN_ERR "omap_dma: trying to unlink "
1185                        "non linked channels\n");
1186                 dump_stack();
1187         }
1188
1189         if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
1190             (dma_chan[lch_queue].flags & OMAP_DMA_ACTIVE)) {
1191                 printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
1192                        "before unlinking\n");
1193                 dump_stack();
1194         }
1195
1196         dma_chan[lch_head].next_lch = -1;
1197 }
1198 EXPORT_SYMBOL(omap_dma_unlink_lch);
1199
1200 #ifndef CONFIG_ARCH_OMAP1
1201 /* Create chain of DMA channesls */
1202 static void create_dma_lch_chain(int lch_head, int lch_queue)
1203 {
1204         u32 l;
1205
1206         /* Check if this is the first link in chain */
1207         if (dma_chan[lch_head].next_linked_ch == -1) {
1208                 dma_chan[lch_head].next_linked_ch = lch_queue;
1209                 dma_chan[lch_head].prev_linked_ch = lch_queue;
1210                 dma_chan[lch_queue].next_linked_ch = lch_head;
1211                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1212         }
1213
1214         /* a link exists, link the new channel in circular chain */
1215         else {
1216                 dma_chan[lch_queue].next_linked_ch =
1217                                         dma_chan[lch_head].next_linked_ch;
1218                 dma_chan[lch_queue].prev_linked_ch = lch_head;
1219                 dma_chan[lch_head].next_linked_ch = lch_queue;
1220                 dma_chan[dma_chan[lch_queue].next_linked_ch].prev_linked_ch =
1221                                         lch_queue;
1222         }
1223
1224         l = p->dma_read(CLNK_CTRL, lch_head);
1225         l &= ~(0x1f);
1226         l |= lch_queue;
1227         p->dma_write(l, CLNK_CTRL, lch_head);
1228
1229         l = p->dma_read(CLNK_CTRL, lch_queue);
1230         l &= ~(0x1f);
1231         l |= (dma_chan[lch_queue].next_linked_ch);
1232         p->dma_write(l, CLNK_CTRL, lch_queue);
1233 }
1234
1235 /**
1236  * @brief omap_request_dma_chain : Request a chain of DMA channels
1237  *
1238  * @param dev_id - Device id using the dma channel
1239  * @param dev_name - Device name
1240  * @param callback - Call back function
1241  * @chain_id -
1242  * @no_of_chans - Number of channels requested
1243  * @chain_mode - Dynamic or static chaining : OMAP_DMA_STATIC_CHAIN
1244  *                                            OMAP_DMA_DYNAMIC_CHAIN
1245  * @params - Channel parameters
1246  *
1247  * @return - Success : 0
1248  *           Failure: -EINVAL/-ENOMEM
1249  */
1250 int omap_request_dma_chain(int dev_id, const char *dev_name,
1251                            void (*callback) (int lch, u16 ch_status,
1252                                              void *data),
1253                            int *chain_id, int no_of_chans, int chain_mode,
1254                            struct omap_dma_channel_params params)
1255 {
1256         int *channels;
1257         int i, err;
1258
1259         /* Is the chain mode valid ? */
1260         if (chain_mode != OMAP_DMA_STATIC_CHAIN
1261                         && chain_mode != OMAP_DMA_DYNAMIC_CHAIN) {
1262                 printk(KERN_ERR "Invalid chain mode requested\n");
1263                 return -EINVAL;
1264         }
1265
1266         if (unlikely((no_of_chans < 1
1267                         || no_of_chans > dma_lch_count))) {
1268                 printk(KERN_ERR "Invalid Number of channels requested\n");
1269                 return -EINVAL;
1270         }
1271
1272         /*
1273          * Allocate a queue to maintain the status of the channels
1274          * in the chain
1275          */
1276         channels = kmalloc(sizeof(*channels) * no_of_chans, GFP_KERNEL);
1277         if (channels == NULL) {
1278                 printk(KERN_ERR "omap_dma: No memory for channel queue\n");
1279                 return -ENOMEM;
1280         }
1281
1282         /* request and reserve DMA channels for the chain */
1283         for (i = 0; i < no_of_chans; i++) {
1284                 err = omap_request_dma(dev_id, dev_name,
1285                                         callback, NULL, &channels[i]);
1286                 if (err < 0) {
1287                         int j;
1288                         for (j = 0; j < i; j++)
1289                                 omap_free_dma(channels[j]);
1290                         kfree(channels);
1291                         printk(KERN_ERR "omap_dma: Request failed %d\n", err);
1292                         return err;
1293                 }
1294                 dma_chan[channels[i]].prev_linked_ch = -1;
1295                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1296
1297                 /*
1298                  * Allowing client drivers to set common parameters now,
1299                  * so that later only relevant (src_start, dest_start
1300                  * and element count) can be set
1301                  */
1302                 omap_set_dma_params(channels[i], &params);
1303         }
1304
1305         *chain_id = channels[0];
1306         dma_linked_lch[*chain_id].linked_dmach_q = channels;
1307         dma_linked_lch[*chain_id].chain_mode = chain_mode;
1308         dma_linked_lch[*chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1309         dma_linked_lch[*chain_id].no_of_lchs_linked = no_of_chans;
1310
1311         for (i = 0; i < no_of_chans; i++)
1312                 dma_chan[channels[i]].chain_id = *chain_id;
1313
1314         /* Reset the Queue pointers */
1315         OMAP_DMA_CHAIN_QINIT(*chain_id);
1316
1317         /* Set up the chain */
1318         if (no_of_chans == 1)
1319                 create_dma_lch_chain(channels[0], channels[0]);
1320         else {
1321                 for (i = 0; i < (no_of_chans - 1); i++)
1322                         create_dma_lch_chain(channels[i], channels[i + 1]);
1323         }
1324
1325         return 0;
1326 }
1327 EXPORT_SYMBOL(omap_request_dma_chain);
1328
1329 /**
1330  * @brief omap_modify_dma_chain_param : Modify the chain's params - Modify the
1331  * params after setting it. Dont do this while dma is running!!
1332  *
1333  * @param chain_id - Chained logical channel id.
1334  * @param params
1335  *
1336  * @return - Success : 0
1337  *           Failure : -EINVAL
1338  */
1339 int omap_modify_dma_chain_params(int chain_id,
1340                                 struct omap_dma_channel_params params)
1341 {
1342         int *channels;
1343         u32 i;
1344
1345         /* Check for input params */
1346         if (unlikely((chain_id < 0
1347                         || chain_id >= dma_lch_count))) {
1348                 printk(KERN_ERR "Invalid chain id\n");
1349                 return -EINVAL;
1350         }
1351
1352         /* Check if the chain exists */
1353         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1354                 printk(KERN_ERR "Chain doesn't exists\n");
1355                 return -EINVAL;
1356         }
1357         channels = dma_linked_lch[chain_id].linked_dmach_q;
1358
1359         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1360                 /*
1361                  * Allowing client drivers to set common parameters now,
1362                  * so that later only relevant (src_start, dest_start
1363                  * and element count) can be set
1364                  */
1365                 omap_set_dma_params(channels[i], &params);
1366         }
1367
1368         return 0;
1369 }
1370 EXPORT_SYMBOL(omap_modify_dma_chain_params);
1371
1372 /**
1373  * @brief omap_free_dma_chain - Free all the logical channels in a chain.
1374  *
1375  * @param chain_id
1376  *
1377  * @return - Success : 0
1378  *           Failure : -EINVAL
1379  */
1380 int omap_free_dma_chain(int chain_id)
1381 {
1382         int *channels;
1383         u32 i;
1384
1385         /* Check for input params */
1386         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1387                 printk(KERN_ERR "Invalid chain id\n");
1388                 return -EINVAL;
1389         }
1390
1391         /* Check if the chain exists */
1392         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1393                 printk(KERN_ERR "Chain doesn't exists\n");
1394                 return -EINVAL;
1395         }
1396
1397         channels = dma_linked_lch[chain_id].linked_dmach_q;
1398         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1399                 dma_chan[channels[i]].next_linked_ch = -1;
1400                 dma_chan[channels[i]].prev_linked_ch = -1;
1401                 dma_chan[channels[i]].chain_id = -1;
1402                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1403                 omap_free_dma(channels[i]);
1404         }
1405
1406         kfree(channels);
1407
1408         dma_linked_lch[chain_id].linked_dmach_q = NULL;
1409         dma_linked_lch[chain_id].chain_mode = -1;
1410         dma_linked_lch[chain_id].chain_state = -1;
1411
1412         return (0);
1413 }
1414 EXPORT_SYMBOL(omap_free_dma_chain);
1415
1416 /**
1417  * @brief omap_dma_chain_status - Check if the chain is in
1418  * active / inactive state.
1419  * @param chain_id
1420  *
1421  * @return - Success : OMAP_DMA_CHAIN_ACTIVE/OMAP_DMA_CHAIN_INACTIVE
1422  *           Failure : -EINVAL
1423  */
1424 int omap_dma_chain_status(int chain_id)
1425 {
1426         /* Check for input params */
1427         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1428                 printk(KERN_ERR "Invalid chain id\n");
1429                 return -EINVAL;
1430         }
1431
1432         /* Check if the chain exists */
1433         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1434                 printk(KERN_ERR "Chain doesn't exists\n");
1435                 return -EINVAL;
1436         }
1437         pr_debug("CHAINID=%d, qcnt=%d\n", chain_id,
1438                         dma_linked_lch[chain_id].q_count);
1439
1440         if (OMAP_DMA_CHAIN_QEMPTY(chain_id))
1441                 return OMAP_DMA_CHAIN_INACTIVE;
1442
1443         return OMAP_DMA_CHAIN_ACTIVE;
1444 }
1445 EXPORT_SYMBOL(omap_dma_chain_status);
1446
1447 /**
1448  * @brief omap_dma_chain_a_transfer - Get a free channel from a chain,
1449  * set the params and start the transfer.
1450  *
1451  * @param chain_id
1452  * @param src_start - buffer start address
1453  * @param dest_start - Dest address
1454  * @param elem_count
1455  * @param frame_count
1456  * @param callbk_data - channel callback parameter data.
1457  *
1458  * @return  - Success : 0
1459  *            Failure: -EINVAL/-EBUSY
1460  */
1461 int omap_dma_chain_a_transfer(int chain_id, int src_start, int dest_start,
1462                         int elem_count, int frame_count, void *callbk_data)
1463 {
1464         int *channels;
1465         u32 l, lch;
1466         int start_dma = 0;
1467
1468         /*
1469          * if buffer size is less than 1 then there is
1470          * no use of starting the chain
1471          */
1472         if (elem_count < 1) {
1473                 printk(KERN_ERR "Invalid buffer size\n");
1474                 return -EINVAL;
1475         }
1476
1477         /* Check for input params */
1478         if (unlikely((chain_id < 0
1479                         || chain_id >= dma_lch_count))) {
1480                 printk(KERN_ERR "Invalid chain id\n");
1481                 return -EINVAL;
1482         }
1483
1484         /* Check if the chain exists */
1485         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1486                 printk(KERN_ERR "Chain doesn't exist\n");
1487                 return -EINVAL;
1488         }
1489
1490         /* Check if all the channels in chain are in use */
1491         if (OMAP_DMA_CHAIN_QFULL(chain_id))
1492                 return -EBUSY;
1493
1494         /* Frame count may be negative in case of indexed transfers */
1495         channels = dma_linked_lch[chain_id].linked_dmach_q;
1496
1497         /* Get a free channel */
1498         lch = channels[dma_linked_lch[chain_id].q_tail];
1499
1500         /* Store the callback data */
1501         dma_chan[lch].data = callbk_data;
1502
1503         /* Increment the q_tail */
1504         OMAP_DMA_CHAIN_INCQTAIL(chain_id);
1505
1506         /* Set the params to the free channel */
1507         if (src_start != 0)
1508                 p->dma_write(src_start, CSSA, lch);
1509         if (dest_start != 0)
1510                 p->dma_write(dest_start, CDSA, lch);
1511
1512         /* Write the buffer size */
1513         p->dma_write(elem_count, CEN, lch);
1514         p->dma_write(frame_count, CFN, lch);
1515
1516         /*
1517          * If the chain is dynamically linked,
1518          * then we may have to start the chain if its not active
1519          */
1520         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_DYNAMIC_CHAIN) {
1521
1522                 /*
1523                  * In Dynamic chain, if the chain is not started,
1524                  * queue the channel
1525                  */
1526                 if (dma_linked_lch[chain_id].chain_state ==
1527                                                 DMA_CHAIN_NOTSTARTED) {
1528                         /* Enable the link in previous channel */
1529                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1530                                                                 DMA_CH_QUEUED)
1531                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1532                         dma_chan[lch].state = DMA_CH_QUEUED;
1533                 }
1534
1535                 /*
1536                  * Chain is already started, make sure its active,
1537                  * if not then start the chain
1538                  */
1539                 else {
1540                         start_dma = 1;
1541
1542                         if (dma_chan[dma_chan[lch].prev_linked_ch].state ==
1543                                                         DMA_CH_STARTED) {
1544                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1545                                 dma_chan[lch].state = DMA_CH_QUEUED;
1546                                 start_dma = 0;
1547                                 if (0 == ((1 << 7) & p->dma_read(
1548                                         CCR, dma_chan[lch].prev_linked_ch))) {
1549                                         disable_lnk(dma_chan[lch].
1550                                                     prev_linked_ch);
1551                                         pr_debug("\n prev ch is stopped\n");
1552                                         start_dma = 1;
1553                                 }
1554                         }
1555
1556                         else if (dma_chan[dma_chan[lch].prev_linked_ch].state
1557                                                         == DMA_CH_QUEUED) {
1558                                 enable_lnk(dma_chan[lch].prev_linked_ch);
1559                                 dma_chan[lch].state = DMA_CH_QUEUED;
1560                                 start_dma = 0;
1561                         }
1562                         omap_enable_channel_irq(lch);
1563
1564                         l = p->dma_read(CCR, lch);
1565
1566                         if ((0 == (l & (1 << 24))))
1567                                 l &= ~(1 << 25);
1568                         else
1569                                 l |= (1 << 25);
1570                         if (start_dma == 1) {
1571                                 if (0 == (l & (1 << 7))) {
1572                                         l |= (1 << 7);
1573                                         dma_chan[lch].state = DMA_CH_STARTED;
1574                                         pr_debug("starting %d\n", lch);
1575                                         p->dma_write(l, CCR, lch);
1576                                 } else
1577                                         start_dma = 0;
1578                         } else {
1579                                 if (0 == (l & (1 << 7)))
1580                                         p->dma_write(l, CCR, lch);
1581                         }
1582                         dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
1583                 }
1584         }
1585
1586         return 0;
1587 }
1588 EXPORT_SYMBOL(omap_dma_chain_a_transfer);
1589
1590 /**
1591  * @brief omap_start_dma_chain_transfers - Start the chain
1592  *
1593  * @param chain_id
1594  *
1595  * @return - Success : 0
1596  *           Failure : -EINVAL/-EBUSY
1597  */
1598 int omap_start_dma_chain_transfers(int chain_id)
1599 {
1600         int *channels;
1601         u32 l, i;
1602
1603         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1604                 printk(KERN_ERR "Invalid chain id\n");
1605                 return -EINVAL;
1606         }
1607
1608         channels = dma_linked_lch[chain_id].linked_dmach_q;
1609
1610         if (dma_linked_lch[channels[0]].chain_state == DMA_CHAIN_STARTED) {
1611                 printk(KERN_ERR "Chain is already started\n");
1612                 return -EBUSY;
1613         }
1614
1615         if (dma_linked_lch[chain_id].chain_mode == OMAP_DMA_STATIC_CHAIN) {
1616                 for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked;
1617                                                                         i++) {
1618                         enable_lnk(channels[i]);
1619                         omap_enable_channel_irq(channels[i]);
1620                 }
1621         } else {
1622                 omap_enable_channel_irq(channels[0]);
1623         }
1624
1625         l = p->dma_read(CCR, channels[0]);
1626         l |= (1 << 7);
1627         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_STARTED;
1628         dma_chan[channels[0]].state = DMA_CH_STARTED;
1629
1630         if ((0 == (l & (1 << 24))))
1631                 l &= ~(1 << 25);
1632         else
1633                 l |= (1 << 25);
1634         p->dma_write(l, CCR, channels[0]);
1635
1636         dma_chan[channels[0]].flags |= OMAP_DMA_ACTIVE;
1637
1638         return 0;
1639 }
1640 EXPORT_SYMBOL(omap_start_dma_chain_transfers);
1641
1642 /**
1643  * @brief omap_stop_dma_chain_transfers - Stop the dma transfer of a chain.
1644  *
1645  * @param chain_id
1646  *
1647  * @return - Success : 0
1648  *           Failure : EINVAL
1649  */
1650 int omap_stop_dma_chain_transfers(int chain_id)
1651 {
1652         int *channels;
1653         u32 l, i;
1654         u32 sys_cf = 0;
1655
1656         /* Check for input params */
1657         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1658                 printk(KERN_ERR "Invalid chain id\n");
1659                 return -EINVAL;
1660         }
1661
1662         /* Check if the chain exists */
1663         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1664                 printk(KERN_ERR "Chain doesn't exists\n");
1665                 return -EINVAL;
1666         }
1667         channels = dma_linked_lch[chain_id].linked_dmach_q;
1668
1669         if (IS_DMA_ERRATA(DMA_ERRATA_i88)) {
1670                 sys_cf = p->dma_read(OCP_SYSCONFIG, 0);
1671                 l = sys_cf;
1672                 /* Middle mode reg set no Standby */
1673                 l &= ~((1 << 12)|(1 << 13));
1674                 p->dma_write(l, OCP_SYSCONFIG, 0);
1675         }
1676
1677         for (i = 0; i < dma_linked_lch[chain_id].no_of_lchs_linked; i++) {
1678
1679                 /* Stop the Channel transmission */
1680                 l = p->dma_read(CCR, channels[i]);
1681                 l &= ~(1 << 7);
1682                 p->dma_write(l, CCR, channels[i]);
1683
1684                 /* Disable the link in all the channels */
1685                 disable_lnk(channels[i]);
1686                 dma_chan[channels[i]].state = DMA_CH_NOTSTARTED;
1687
1688         }
1689         dma_linked_lch[chain_id].chain_state = DMA_CHAIN_NOTSTARTED;
1690
1691         /* Reset the Queue pointers */
1692         OMAP_DMA_CHAIN_QINIT(chain_id);
1693
1694         if (IS_DMA_ERRATA(DMA_ERRATA_i88))
1695                 p->dma_write(sys_cf, OCP_SYSCONFIG, 0);
1696
1697         return 0;
1698 }
1699 EXPORT_SYMBOL(omap_stop_dma_chain_transfers);
1700
1701 /* Get the index of the ongoing DMA in chain */
1702 /**
1703  * @brief omap_get_dma_chain_index - Get the element and frame index
1704  * of the ongoing DMA in chain
1705  *
1706  * @param chain_id
1707  * @param ei - Element index
1708  * @param fi - Frame index
1709  *
1710  * @return - Success : 0
1711  *           Failure : -EINVAL
1712  */
1713 int omap_get_dma_chain_index(int chain_id, int *ei, int *fi)
1714 {
1715         int lch;
1716         int *channels;
1717
1718         /* Check for input params */
1719         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1720                 printk(KERN_ERR "Invalid chain id\n");
1721                 return -EINVAL;
1722         }
1723
1724         /* Check if the chain exists */
1725         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1726                 printk(KERN_ERR "Chain doesn't exists\n");
1727                 return -EINVAL;
1728         }
1729         if ((!ei) || (!fi))
1730                 return -EINVAL;
1731
1732         channels = dma_linked_lch[chain_id].linked_dmach_q;
1733
1734         /* Get the current channel */
1735         lch = channels[dma_linked_lch[chain_id].q_head];
1736
1737         *ei = p->dma_read(CCEN, lch);
1738         *fi = p->dma_read(CCFN, lch);
1739
1740         return 0;
1741 }
1742 EXPORT_SYMBOL(omap_get_dma_chain_index);
1743
1744 /**
1745  * @brief omap_get_dma_chain_dst_pos - Get the destination position of the
1746  * ongoing DMA in chain
1747  *
1748  * @param chain_id
1749  *
1750  * @return - Success : Destination position
1751  *           Failure : -EINVAL
1752  */
1753 int omap_get_dma_chain_dst_pos(int chain_id)
1754 {
1755         int lch;
1756         int *channels;
1757
1758         /* Check for input params */
1759         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1760                 printk(KERN_ERR "Invalid chain id\n");
1761                 return -EINVAL;
1762         }
1763
1764         /* Check if the chain exists */
1765         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1766                 printk(KERN_ERR "Chain doesn't exists\n");
1767                 return -EINVAL;
1768         }
1769
1770         channels = dma_linked_lch[chain_id].linked_dmach_q;
1771
1772         /* Get the current channel */
1773         lch = channels[dma_linked_lch[chain_id].q_head];
1774
1775         return p->dma_read(CDAC, lch);
1776 }
1777 EXPORT_SYMBOL(omap_get_dma_chain_dst_pos);
1778
1779 /**
1780  * @brief omap_get_dma_chain_src_pos - Get the source position
1781  * of the ongoing DMA in chain
1782  * @param chain_id
1783  *
1784  * @return - Success : Destination position
1785  *           Failure : -EINVAL
1786  */
1787 int omap_get_dma_chain_src_pos(int chain_id)
1788 {
1789         int lch;
1790         int *channels;
1791
1792         /* Check for input params */
1793         if (unlikely((chain_id < 0 || chain_id >= dma_lch_count))) {
1794                 printk(KERN_ERR "Invalid chain id\n");
1795                 return -EINVAL;
1796         }
1797
1798         /* Check if the chain exists */
1799         if (dma_linked_lch[chain_id].linked_dmach_q == NULL) {
1800                 printk(KERN_ERR "Chain doesn't exists\n");
1801                 return -EINVAL;
1802         }
1803
1804         channels = dma_linked_lch[chain_id].linked_dmach_q;
1805
1806         /* Get the current channel */
1807         lch = channels[dma_linked_lch[chain_id].q_head];
1808
1809         return p->dma_read(CSAC, lch);
1810 }
1811 EXPORT_SYMBOL(omap_get_dma_chain_src_pos);
1812 #endif  /* ifndef CONFIG_ARCH_OMAP1 */
1813
1814 /*----------------------------------------------------------------------------*/
1815
1816 #ifdef CONFIG_ARCH_OMAP1
1817
1818 static int omap1_dma_handle_ch(int ch)
1819 {
1820         u32 csr;
1821
1822         if (enable_1510_mode && ch >= 6) {
1823                 csr = dma_chan[ch].saved_csr;
1824                 dma_chan[ch].saved_csr = 0;
1825         } else
1826                 csr = p->dma_read(CSR, ch);
1827         if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
1828                 dma_chan[ch + 6].saved_csr = csr >> 7;
1829                 csr &= 0x7f;
1830         }
1831         if ((csr & 0x3f) == 0)
1832                 return 0;
1833         if (unlikely(dma_chan[ch].dev_id == -1)) {
1834                 printk(KERN_WARNING "Spurious interrupt from DMA channel "
1835                        "%d (CSR %04x)\n", ch, csr);
1836                 return 0;
1837         }
1838         if (unlikely(csr & OMAP1_DMA_TOUT_IRQ))
1839                 printk(KERN_WARNING "DMA timeout with device %d\n",
1840                        dma_chan[ch].dev_id);
1841         if (unlikely(csr & OMAP_DMA_DROP_IRQ))
1842                 printk(KERN_WARNING "DMA synchronization event drop occurred "
1843                        "with device %d\n", dma_chan[ch].dev_id);
1844         if (likely(csr & OMAP_DMA_BLOCK_IRQ))
1845                 dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1846         if (likely(dma_chan[ch].callback != NULL))
1847                 dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
1848
1849         return 1;
1850 }
1851
1852 static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id)
1853 {
1854         int ch = ((int) dev_id) - 1;
1855         int handled = 0;
1856
1857         for (;;) {
1858                 int handled_now = 0;
1859
1860                 handled_now += omap1_dma_handle_ch(ch);
1861                 if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
1862                         handled_now += omap1_dma_handle_ch(ch + 6);
1863                 if (!handled_now)
1864                         break;
1865                 handled += handled_now;
1866         }
1867
1868         return handled ? IRQ_HANDLED : IRQ_NONE;
1869 }
1870
1871 #else
1872 #define omap1_dma_irq_handler   NULL
1873 #endif
1874
1875 #ifdef CONFIG_ARCH_OMAP2PLUS
1876
1877 static int omap2_dma_handle_ch(int ch)
1878 {
1879         u32 status = p->dma_read(CSR, ch);
1880
1881         if (!status) {
1882                 if (printk_ratelimit())
1883                         printk(KERN_WARNING "Spurious DMA IRQ for lch %d\n",
1884                                 ch);
1885                 p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1886                 return 0;
1887         }
1888         if (unlikely(dma_chan[ch].dev_id == -1)) {
1889                 if (printk_ratelimit())
1890                         printk(KERN_WARNING "IRQ %04x for non-allocated DMA"
1891                                         "channel %d\n", status, ch);
1892                 return 0;
1893         }
1894         if (unlikely(status & OMAP_DMA_DROP_IRQ))
1895                 printk(KERN_INFO
1896                        "DMA synchronization event drop occurred with device "
1897                        "%d\n", dma_chan[ch].dev_id);
1898         if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ)) {
1899                 printk(KERN_INFO "DMA transaction error with device %d\n",
1900                        dma_chan[ch].dev_id);
1901                 if (IS_DMA_ERRATA(DMA_ERRATA_i378)) {
1902                         u32 ccr;
1903
1904                         ccr = p->dma_read(CCR, ch);
1905                         ccr &= ~OMAP_DMA_CCR_EN;
1906                         p->dma_write(ccr, CCR, ch);
1907                         dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
1908                 }
1909         }
1910         if (unlikely(status & OMAP2_DMA_SECURE_ERR_IRQ))
1911                 printk(KERN_INFO "DMA secure error with device %d\n",
1912                        dma_chan[ch].dev_id);
1913         if (unlikely(status & OMAP2_DMA_MISALIGNED_ERR_IRQ))
1914                 printk(KERN_INFO "DMA misaligned error with device %d\n",
1915                        dma_chan[ch].dev_id);
1916
1917         p->dma_write(status, CSR, ch);
1918         p->dma_write(1 << ch, IRQSTATUS_L0, ch);
1919         /* read back the register to flush the write */
1920         p->dma_read(IRQSTATUS_L0, ch);
1921
1922         /* If the ch is not chained then chain_id will be -1 */
1923         if (dma_chan[ch].chain_id != -1) {
1924                 int chain_id = dma_chan[ch].chain_id;
1925                 dma_chan[ch].state = DMA_CH_NOTSTARTED;
1926                 if (p->dma_read(CLNK_CTRL, ch) & (1 << 15))
1927                         dma_chan[dma_chan[ch].next_linked_ch].state =
1928                                                         DMA_CH_STARTED;
1929                 if (dma_linked_lch[chain_id].chain_mode ==
1930                                                 OMAP_DMA_DYNAMIC_CHAIN)
1931                         disable_lnk(ch);
1932
1933                 if (!OMAP_DMA_CHAIN_QEMPTY(chain_id))
1934                         OMAP_DMA_CHAIN_INCQHEAD(chain_id);
1935
1936                 status = p->dma_read(CSR, ch);
1937                 p->dma_write(status, CSR, ch);
1938         }
1939
1940         if (likely(dma_chan[ch].callback != NULL))
1941                 dma_chan[ch].callback(ch, status, dma_chan[ch].data);
1942
1943         return 0;
1944 }
1945
1946 /* STATUS register count is from 1-32 while our is 0-31 */
1947 static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id)
1948 {
1949         u32 val, enable_reg;
1950         int i;
1951
1952         val = p->dma_read(IRQSTATUS_L0, 0);
1953         if (val == 0) {
1954                 if (printk_ratelimit())
1955                         printk(KERN_WARNING "Spurious DMA IRQ\n");
1956                 return IRQ_HANDLED;
1957         }
1958         enable_reg = p->dma_read(IRQENABLE_L0, 0);
1959         val &= enable_reg; /* Dispatch only relevant interrupts */
1960         for (i = 0; i < dma_lch_count && val != 0; i++) {
1961                 if (val & 1)
1962                         omap2_dma_handle_ch(i);
1963                 val >>= 1;
1964         }
1965
1966         return IRQ_HANDLED;
1967 }
1968
1969 static struct irqaction omap24xx_dma_irq = {
1970         .name = "DMA",
1971         .handler = omap2_dma_irq_handler,
1972         .flags = IRQF_DISABLED
1973 };
1974
1975 #else
1976 static struct irqaction omap24xx_dma_irq;
1977 #endif
1978
1979 /*----------------------------------------------------------------------------*/
1980
1981 void omap_dma_global_context_save(void)
1982 {
1983         omap_dma_global_context.dma_irqenable_l0 =
1984                 p->dma_read(IRQENABLE_L0, 0);
1985         omap_dma_global_context.dma_ocp_sysconfig =
1986                 p->dma_read(OCP_SYSCONFIG, 0);
1987         omap_dma_global_context.dma_gcr = p->dma_read(GCR, 0);
1988 }
1989
1990 void omap_dma_global_context_restore(void)
1991 {
1992         int ch;
1993
1994         p->dma_write(omap_dma_global_context.dma_gcr, GCR, 0);
1995         p->dma_write(omap_dma_global_context.dma_ocp_sysconfig,
1996                 OCP_SYSCONFIG, 0);
1997         p->dma_write(omap_dma_global_context.dma_irqenable_l0,
1998                 IRQENABLE_L0, 0);
1999
2000         if (IS_DMA_ERRATA(DMA_ROMCODE_BUG))
2001                 p->dma_write(0x3 , IRQSTATUS_L0, 0);
2002
2003         for (ch = 0; ch < dma_chan_count; ch++)
2004                 if (dma_chan[ch].dev_id != -1)
2005                         omap_clear_dma(ch);
2006 }
2007
2008 static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2009 {
2010         int ch, ret = 0;
2011         int dma_irq;
2012         char irq_name[4];
2013         int irq_rel;
2014
2015         p = pdev->dev.platform_data;
2016         if (!p) {
2017                 dev_err(&pdev->dev, "%s: System DMA initialized without"
2018                         "platform data\n", __func__);
2019                 return -EINVAL;
2020         }
2021
2022         d                       = p->dma_attr;
2023         errata                  = p->errata;
2024
2025         if ((d->dev_caps & RESERVE_CHANNEL) && omap_dma_reserve_channels
2026                         && (omap_dma_reserve_channels <= dma_lch_count))
2027                 d->lch_count    = omap_dma_reserve_channels;
2028
2029         dma_lch_count           = d->lch_count;
2030         dma_chan_count          = dma_lch_count;
2031         dma_chan                = d->chan;
2032         enable_1510_mode        = d->dev_caps & ENABLE_1510_MODE;
2033
2034         if (cpu_class_is_omap2()) {
2035                 dma_linked_lch = kzalloc(sizeof(struct dma_link_info) *
2036                                                 dma_lch_count, GFP_KERNEL);
2037                 if (!dma_linked_lch) {
2038                         ret = -ENOMEM;
2039                         goto exit_dma_lch_fail;
2040                 }
2041         }
2042
2043         spin_lock_init(&dma_chan_lock);
2044         for (ch = 0; ch < dma_chan_count; ch++) {
2045                 omap_clear_dma(ch);
2046                 if (cpu_class_is_omap2())
2047                         omap2_disable_irq_lch(ch);
2048
2049                 dma_chan[ch].dev_id = -1;
2050                 dma_chan[ch].next_lch = -1;
2051
2052                 if (ch >= 6 && enable_1510_mode)
2053                         continue;
2054
2055                 if (cpu_class_is_omap1()) {
2056                         /*
2057                          * request_irq() doesn't like dev_id (ie. ch) being
2058                          * zero, so we have to kludge around this.
2059                          */
2060                         sprintf(&irq_name[0], "%d", ch);
2061                         dma_irq = platform_get_irq_byname(pdev, irq_name);
2062
2063                         if (dma_irq < 0) {
2064                                 ret = dma_irq;
2065                                 goto exit_dma_irq_fail;
2066                         }
2067
2068                         /* INT_DMA_LCD is handled in lcd_dma.c */
2069                         if (dma_irq == INT_DMA_LCD)
2070                                 continue;
2071
2072                         ret = request_irq(dma_irq,
2073                                         omap1_dma_irq_handler, 0, "DMA",
2074                                         (void *) (ch + 1));
2075                         if (ret != 0)
2076                                 goto exit_dma_irq_fail;
2077                 }
2078         }
2079
2080         if (cpu_class_is_omap2() && !cpu_is_omap242x())
2081                 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2082                                 DMA_DEFAULT_FIFO_DEPTH, 0);
2083
2084         if (cpu_class_is_omap2()) {
2085                 strcpy(irq_name, "0");
2086                 dma_irq = platform_get_irq_byname(pdev, irq_name);
2087                 if (dma_irq < 0) {
2088                         dev_err(&pdev->dev, "failed: request IRQ %d", dma_irq);
2089                         goto exit_dma_lch_fail;
2090                 }
2091                 ret = setup_irq(dma_irq, &omap24xx_dma_irq);
2092                 if (ret) {
2093                         dev_err(&pdev->dev, "set_up failed for IRQ %d"
2094                                 "for DMA (error %d)\n", dma_irq, ret);
2095                         goto exit_dma_lch_fail;
2096                 }
2097         }
2098
2099         /* reserve dma channels 0 and 1 in high security devices */
2100         if (cpu_is_omap34xx() &&
2101                 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
2102                 printk(KERN_INFO "Reserving DMA channels 0 and 1 for "
2103                                 "HS ROM code\n");
2104                 dma_chan[0].dev_id = 0;
2105                 dma_chan[1].dev_id = 1;
2106         }
2107         p->show_dma_caps();
2108         return 0;
2109
2110 exit_dma_irq_fail:
2111         dev_err(&pdev->dev, "unable to request IRQ %d"
2112                         "for DMA (error %d)\n", dma_irq, ret);
2113         for (irq_rel = 0; irq_rel < ch; irq_rel++) {
2114                 dma_irq = platform_get_irq(pdev, irq_rel);
2115                 free_irq(dma_irq, (void *)(irq_rel + 1));
2116         }
2117
2118 exit_dma_lch_fail:
2119         kfree(p);
2120         kfree(d);
2121         kfree(dma_chan);
2122         return ret;
2123 }
2124
2125 static int __devexit omap_system_dma_remove(struct platform_device *pdev)
2126 {
2127         int dma_irq;
2128
2129         if (cpu_class_is_omap2()) {
2130                 char irq_name[4];
2131                 strcpy(irq_name, "0");
2132                 dma_irq = platform_get_irq_byname(pdev, irq_name);
2133                 remove_irq(dma_irq, &omap24xx_dma_irq);
2134         } else {
2135                 int irq_rel = 0;
2136                 for ( ; irq_rel < dma_chan_count; irq_rel++) {
2137                         dma_irq = platform_get_irq(pdev, irq_rel);
2138                         free_irq(dma_irq, (void *)(irq_rel + 1));
2139                 }
2140         }
2141         kfree(p);
2142         kfree(d);
2143         kfree(dma_chan);
2144         return 0;
2145 }
2146
2147 static struct platform_driver omap_system_dma_driver = {
2148         .probe          = omap_system_dma_probe,
2149         .remove         = __devexit_p(omap_system_dma_remove),
2150         .driver         = {
2151                 .name   = "omap_dma_system"
2152         },
2153 };
2154
2155 static int __init omap_system_dma_init(void)
2156 {
2157         return platform_driver_register(&omap_system_dma_driver);
2158 }
2159 arch_initcall(omap_system_dma_init);
2160
2161 static void __exit omap_system_dma_exit(void)
2162 {
2163         platform_driver_unregister(&omap_system_dma_driver);
2164 }
2165
2166 MODULE_DESCRIPTION("OMAP SYSTEM DMA DRIVER");
2167 MODULE_LICENSE("GPL");
2168 MODULE_ALIAS("platform:" DRIVER_NAME);
2169 MODULE_AUTHOR("Texas Instruments Inc");
2170
2171 /*
2172  * Reserve the omap SDMA channels using cmdline bootarg
2173  * "omap_dma_reserve_ch=". The valid range is 1 to 32
2174  */
2175 static int __init omap_dma_cmdline_reserve_ch(char *str)
2176 {
2177         if (get_option(&str, &omap_dma_reserve_channels) != 1)
2178                 omap_dma_reserve_channels = 0;
2179         return 1;
2180 }
2181
2182 __setup("omap_dma_reserve_ch=", omap_dma_cmdline_reserve_ch);
2183
2184