2 * EDMA3 support for DaVinci
4 * Copyright (C) 2006-2009 Texas Instruments.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 #include <linux/kernel.h>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/platform_device.h>
26 #include <linux/slab.h>
28 #include <mach/edma.h>
30 /* Offsets matching "struct edmacc_param" */
33 #define PARM_A_B_CNT 0x08
35 #define PARM_SRC_DST_BIDX 0x10
36 #define PARM_LINK_BCNTRLD 0x14
37 #define PARM_SRC_DST_CIDX 0x18
38 #define PARM_CCNT 0x1c
40 #define PARM_SIZE 0x20
42 /* Offsets for EDMA CC global channel registers and their shadows */
43 #define SH_ER 0x00 /* 64 bits */
44 #define SH_ECR 0x08 /* 64 bits */
45 #define SH_ESR 0x10 /* 64 bits */
46 #define SH_CER 0x18 /* 64 bits */
47 #define SH_EER 0x20 /* 64 bits */
48 #define SH_EECR 0x28 /* 64 bits */
49 #define SH_EESR 0x30 /* 64 bits */
50 #define SH_SER 0x38 /* 64 bits */
51 #define SH_SECR 0x40 /* 64 bits */
52 #define SH_IER 0x50 /* 64 bits */
53 #define SH_IECR 0x58 /* 64 bits */
54 #define SH_IESR 0x60 /* 64 bits */
55 #define SH_IPR 0x68 /* 64 bits */
56 #define SH_ICR 0x70 /* 64 bits */
66 /* Offsets for EDMA CC global registers */
67 #define EDMA_REV 0x0000
68 #define EDMA_CCCFG 0x0004
69 #define EDMA_QCHMAP 0x0200 /* 8 registers */
70 #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */
71 #define EDMA_QDMAQNUM 0x0260
72 #define EDMA_QUETCMAP 0x0280
73 #define EDMA_QUEPRI 0x0284
74 #define EDMA_EMR 0x0300 /* 64 bits */
75 #define EDMA_EMCR 0x0308 /* 64 bits */
76 #define EDMA_QEMR 0x0310
77 #define EDMA_QEMCR 0x0314
78 #define EDMA_CCERR 0x0318
79 #define EDMA_CCERRCLR 0x031c
80 #define EDMA_EEVAL 0x0320
81 #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/
82 #define EDMA_QRAE 0x0380 /* 4 registers */
83 #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */
84 #define EDMA_QSTAT 0x0600 /* 2 registers */
85 #define EDMA_QWMTHRA 0x0620
86 #define EDMA_QWMTHRB 0x0624
87 #define EDMA_CCSTAT 0x0640
89 #define EDMA_M 0x1000 /* global channel registers */
90 #define EDMA_ECR 0x1008
91 #define EDMA_ECRH 0x100C
92 #define EDMA_SHADOW0 0x2000 /* 4 regions shadowing global channels */
93 #define EDMA_PARM 0x4000 /* 128 param entries */
95 #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5))
97 #define EDMA_DCHMAP 0x0100 /* 64 registers */
98 #define CHMAP_EXIST BIT(24)
100 #define EDMA_MAX_DMACH 64
101 #define EDMA_MAX_PARAMENTRY 512
103 /*****************************************************************************/
105 static void __iomem *edmacc_regs_base[EDMA_MAX_CC];
107 static inline unsigned int edma_read(unsigned ctlr, int offset)
109 return (unsigned int)__raw_readl(edmacc_regs_base[ctlr] + offset);
112 static inline void edma_write(unsigned ctlr, int offset, int val)
114 __raw_writel(val, edmacc_regs_base[ctlr] + offset);
116 static inline void edma_modify(unsigned ctlr, int offset, unsigned and,
119 unsigned val = edma_read(ctlr, offset);
122 edma_write(ctlr, offset, val);
124 static inline void edma_and(unsigned ctlr, int offset, unsigned and)
126 unsigned val = edma_read(ctlr, offset);
128 edma_write(ctlr, offset, val);
130 static inline void edma_or(unsigned ctlr, int offset, unsigned or)
132 unsigned val = edma_read(ctlr, offset);
134 edma_write(ctlr, offset, val);
136 static inline unsigned int edma_read_array(unsigned ctlr, int offset, int i)
138 return edma_read(ctlr, offset + (i << 2));
140 static inline void edma_write_array(unsigned ctlr, int offset, int i,
143 edma_write(ctlr, offset + (i << 2), val);
145 static inline void edma_modify_array(unsigned ctlr, int offset, int i,
146 unsigned and, unsigned or)
148 edma_modify(ctlr, offset + (i << 2), and, or);
150 static inline void edma_or_array(unsigned ctlr, int offset, int i, unsigned or)
152 edma_or(ctlr, offset + (i << 2), or);
154 static inline void edma_or_array2(unsigned ctlr, int offset, int i, int j,
157 edma_or(ctlr, offset + ((i*2 + j) << 2), or);
159 static inline void edma_write_array2(unsigned ctlr, int offset, int i, int j,
162 edma_write(ctlr, offset + ((i*2 + j) << 2), val);
164 static inline unsigned int edma_shadow0_read(unsigned ctlr, int offset)
166 return edma_read(ctlr, EDMA_SHADOW0 + offset);
168 static inline unsigned int edma_shadow0_read_array(unsigned ctlr, int offset,
171 return edma_read(ctlr, EDMA_SHADOW0 + offset + (i << 2));
173 static inline void edma_shadow0_write(unsigned ctlr, int offset, unsigned val)
175 edma_write(ctlr, EDMA_SHADOW0 + offset, val);
177 static inline void edma_shadow0_write_array(unsigned ctlr, int offset, int i,
180 edma_write(ctlr, EDMA_SHADOW0 + offset + (i << 2), val);
182 static inline unsigned int edma_parm_read(unsigned ctlr, int offset,
185 return edma_read(ctlr, EDMA_PARM + offset + (param_no << 5));
187 static inline void edma_parm_write(unsigned ctlr, int offset, int param_no,
190 edma_write(ctlr, EDMA_PARM + offset + (param_no << 5), val);
192 static inline void edma_parm_modify(unsigned ctlr, int offset, int param_no,
193 unsigned and, unsigned or)
195 edma_modify(ctlr, EDMA_PARM + offset + (param_no << 5), and, or);
197 static inline void edma_parm_and(unsigned ctlr, int offset, int param_no,
200 edma_and(ctlr, EDMA_PARM + offset + (param_no << 5), and);
202 static inline void edma_parm_or(unsigned ctlr, int offset, int param_no,
205 edma_or(ctlr, EDMA_PARM + offset + (param_no << 5), or);
208 static inline void set_bits(int offset, int len, unsigned long *p)
210 for (; len > 0; len--)
211 set_bit(offset + (len - 1), p);
214 static inline void clear_bits(int offset, int len, unsigned long *p)
216 for (; len > 0; len--)
217 clear_bit(offset + (len - 1), p);
220 /*****************************************************************************/
222 /* actual number of DMA channels and slots on this silicon */
224 /* how many dma resources of each type */
225 unsigned num_channels;
230 enum dma_event_q default_queue;
232 /* list of channels with no even trigger; terminated by "-1" */
235 /* The edma_inuse bit for each PaRAM slot is clear unless the
236 * channel is in use ... by ARM or DSP, for QDMA, or whatever.
238 DECLARE_BITMAP(edma_inuse, EDMA_MAX_PARAMENTRY);
240 /* The edma_unused bit for each channel is clear unless
241 * it is not being used on this platform. It uses a bit
242 * of SOC-specific initialization code.
244 DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
246 unsigned irq_res_start;
247 unsigned irq_res_end;
249 struct dma_interrupt_data {
250 void (*callback)(unsigned channel, unsigned short ch_status,
253 } intr_data[EDMA_MAX_DMACH];
256 static struct edma *edma_cc[EDMA_MAX_CC];
257 static int arch_num_cc;
259 /* dummy param set used to (re)initialize parameter RAM slots */
260 static const struct edmacc_param dummy_paramset = {
261 .link_bcntrld = 0xffff,
265 /*****************************************************************************/
267 static void map_dmach_queue(unsigned ctlr, unsigned ch_no,
268 enum dma_event_q queue_no)
270 int bit = (ch_no & 0x7) * 4;
272 /* default to low priority queue */
273 if (queue_no == EVENTQ_DEFAULT)
274 queue_no = edma_cc[ctlr]->default_queue;
277 edma_modify_array(ctlr, EDMA_DMAQNUM, (ch_no >> 3),
278 ~(0x7 << bit), queue_no << bit);
281 static void __init map_queue_tc(unsigned ctlr, int queue_no, int tc_no)
283 int bit = queue_no * 4;
284 edma_modify(ctlr, EDMA_QUETCMAP, ~(0x7 << bit), ((tc_no & 0x7) << bit));
287 static void __init assign_priority_to_queue(unsigned ctlr, int queue_no,
290 int bit = queue_no * 4;
291 edma_modify(ctlr, EDMA_QUEPRI, ~(0x7 << bit),
292 ((priority & 0x7) << bit));
296 * map_dmach_param - Maps channel number to param entry number
298 * This maps the dma channel number to param entry numberter. In
299 * other words using the DMA channel mapping registers a param entry
300 * can be mapped to any channel
302 * Callers are responsible for ensuring the channel mapping logic is
303 * included in that particular EDMA variant (Eg : dm646x)
306 static void __init map_dmach_param(unsigned ctlr)
309 for (i = 0; i < EDMA_MAX_DMACH; i++)
310 edma_write_array(ctlr, EDMA_DCHMAP , i , (i << 5));
314 setup_dma_interrupt(unsigned lch,
315 void (*callback)(unsigned channel, u16 ch_status, void *data),
320 ctlr = EDMA_CTLR(lch);
321 lch = EDMA_CHAN_SLOT(lch);
324 edma_shadow0_write_array(ctlr, SH_IECR, lch >> 5,
327 edma_cc[ctlr]->intr_data[lch].callback = callback;
328 edma_cc[ctlr]->intr_data[lch].data = data;
331 edma_shadow0_write_array(ctlr, SH_ICR, lch >> 5,
333 edma_shadow0_write_array(ctlr, SH_IESR, lch >> 5,
338 static int irq2ctlr(int irq)
340 if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
342 else if (irq >= edma_cc[1]->irq_res_start &&
343 irq <= edma_cc[1]->irq_res_end)
349 /******************************************************************************
351 * DMA interrupt handler
353 *****************************************************************************/
354 static irqreturn_t dma_irq_handler(int irq, void *data)
358 unsigned int cnt = 0;
360 ctlr = irq2ctlr(irq);
362 dev_dbg(data, "dma_irq_handler\n");
364 if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) &&
365 (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0))
370 if (edma_shadow0_read_array(ctlr, SH_IPR, 0) &
371 edma_shadow0_read_array(ctlr, SH_IER, 0))
373 else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) &
374 edma_shadow0_read_array(ctlr, SH_IER, 1))
378 dev_dbg(data, "IPR%d %08x\n", j,
379 edma_shadow0_read_array(ctlr, SH_IPR, j));
380 for (i = 0; i < 32; i++) {
381 int k = (j << 5) + i;
382 if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i))
383 && (edma_shadow0_read_array(ctlr,
384 SH_IER, j) & BIT(i))) {
385 /* Clear the corresponding IPR bits */
386 edma_shadow0_write_array(ctlr, SH_ICR, j,
388 if (edma_cc[ctlr]->intr_data[k].callback)
389 edma_cc[ctlr]->intr_data[k].callback(
391 edma_cc[ctlr]->intr_data[k].
399 edma_shadow0_write(ctlr, SH_IEVAL, 1);
403 /******************************************************************************
405 * DMA error interrupt handler
407 *****************************************************************************/
408 static irqreturn_t dma_ccerr_handler(int irq, void *data)
412 unsigned int cnt = 0;
414 ctlr = irq2ctlr(irq);
416 dev_dbg(data, "dma_ccerr_handler\n");
418 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
419 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
420 (edma_read(ctlr, EDMA_QEMR) == 0) &&
421 (edma_read(ctlr, EDMA_CCERR) == 0))
426 if (edma_read_array(ctlr, EDMA_EMR, 0))
428 else if (edma_read_array(ctlr, EDMA_EMR, 1))
431 dev_dbg(data, "EMR%d %08x\n", j,
432 edma_read_array(ctlr, EDMA_EMR, j));
433 for (i = 0; i < 32; i++) {
434 int k = (j << 5) + i;
435 if (edma_read_array(ctlr, EDMA_EMR, j) &
437 /* Clear the corresponding EMR bits */
438 edma_write_array(ctlr, EDMA_EMCR, j,
441 edma_shadow0_write_array(ctlr, SH_SECR,
443 if (edma_cc[ctlr]->intr_data[k].
445 edma_cc[ctlr]->intr_data[k].
448 edma_cc[ctlr]->intr_data
453 } else if (edma_read(ctlr, EDMA_QEMR)) {
454 dev_dbg(data, "QEMR %02x\n",
455 edma_read(ctlr, EDMA_QEMR));
456 for (i = 0; i < 8; i++) {
457 if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
458 /* Clear the corresponding IPR bits */
459 edma_write(ctlr, EDMA_QEMCR, BIT(i));
460 edma_shadow0_write(ctlr, SH_QSECR,
463 /* NOTE: not reported!! */
466 } else if (edma_read(ctlr, EDMA_CCERR)) {
467 dev_dbg(data, "CCERR %08x\n",
468 edma_read(ctlr, EDMA_CCERR));
469 /* FIXME: CCERR.BIT(16) ignored! much better
470 * to just write CCERRCLR with CCERR value...
472 for (i = 0; i < 8; i++) {
473 if (edma_read(ctlr, EDMA_CCERR) & BIT(i)) {
474 /* Clear the corresponding IPR bits */
475 edma_write(ctlr, EDMA_CCERRCLR, BIT(i));
477 /* NOTE: not reported!! */
481 if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
482 (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
483 (edma_read(ctlr, EDMA_QEMR) == 0) &&
484 (edma_read(ctlr, EDMA_CCERR) == 0))
490 edma_write(ctlr, EDMA_EEVAL, 1);
494 /******************************************************************************
496 * Transfer controller error interrupt handlers
498 *****************************************************************************/
500 #define tc_errs_handled false /* disabled as long as they're NOPs */
502 static irqreturn_t dma_tc0err_handler(int irq, void *data)
504 dev_dbg(data, "dma_tc0err_handler\n");
508 static irqreturn_t dma_tc1err_handler(int irq, void *data)
510 dev_dbg(data, "dma_tc1err_handler\n");
514 static int reserve_contiguous_slots(int ctlr, unsigned int id,
515 unsigned int num_slots,
516 unsigned int start_slot)
519 unsigned int count = num_slots;
520 int stop_slot = start_slot;
521 DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
523 for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
524 j = EDMA_CHAN_SLOT(i);
525 if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
526 /* Record our current beginning slot */
527 if (count == num_slots)
531 set_bit(j, tmp_inuse);
536 clear_bit(j, tmp_inuse);
538 if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
548 * We have to clear any bits that we set
549 * if we run out parameter RAM slots, i.e we do find a set
550 * of contiguous parameter RAM slots but do not find the exact number
551 * requested as we may reach the total number of parameter RAM slots
553 if (i == edma_cc[ctlr]->num_slots)
556 for (j = start_slot; j < stop_slot; j++)
557 if (test_bit(j, tmp_inuse))
558 clear_bit(j, edma_cc[ctlr]->edma_inuse);
563 for (j = i - num_slots + 1; j <= i; ++j)
564 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
565 &dummy_paramset, PARM_SIZE);
567 return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
570 static int prepare_unused_channel_list(struct device *dev, void *data)
572 struct platform_device *pdev = to_platform_device(dev);
575 for (i = 0; i < pdev->num_resources; i++) {
576 if ((pdev->resource[i].flags & IORESOURCE_DMA) &&
577 (int)pdev->resource[i].start >= 0) {
578 ctlr = EDMA_CTLR(pdev->resource[i].start);
579 clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start),
580 edma_cc[ctlr]->edma_unused);
587 /*-----------------------------------------------------------------------*/
589 static bool unused_chan_list_done;
591 /* Resource alloc/free: dma channels, parameter RAM slots */
594 * edma_alloc_channel - allocate DMA channel and paired parameter RAM
595 * @channel: specific channel to allocate; negative for "any unmapped channel"
596 * @callback: optional; to be issued on DMA completion or errors
597 * @data: passed to callback
598 * @eventq_no: an EVENTQ_* constant, used to choose which Transfer
599 * Controller (TC) executes requests using this channel. Use
600 * EVENTQ_DEFAULT unless you really need a high priority queue.
602 * This allocates a DMA channel and its associated parameter RAM slot.
603 * The parameter RAM is initialized to hold a dummy transfer.
605 * Normal use is to pass a specific channel number as @channel, to make
606 * use of hardware events mapped to that channel. When the channel will
607 * be used only for software triggering or event chaining, channels not
608 * mapped to hardware events (or mapped to unused events) are preferable.
610 * DMA transfers start from a channel using edma_start(), or by
611 * chaining. When the transfer described in that channel's parameter RAM
612 * slot completes, that slot's data may be reloaded through a link.
614 * DMA errors are only reported to the @callback associated with the
615 * channel driving that transfer, but transfer completion callbacks can
616 * be sent to another channel under control of the TCC field in
617 * the option word of the transfer's parameter RAM set. Drivers must not
618 * use DMA transfer completion callbacks for channels they did not allocate.
619 * (The same applies to TCC codes used in transfer chaining.)
621 * Returns the number of the channel, else negative errno.
623 int edma_alloc_channel(int channel,
624 void (*callback)(unsigned channel, u16 ch_status, void *data),
626 enum dma_event_q eventq_no)
628 unsigned i, done = 0, ctlr = 0;
631 if (!unused_chan_list_done) {
633 * Scan all the platform devices to find out the EDMA channels
634 * used and clear them in the unused list, making the rest
635 * available for ARM usage.
637 ret = bus_for_each_dev(&platform_bus_type, NULL, NULL,
638 prepare_unused_channel_list);
642 unused_chan_list_done = true;
646 ctlr = EDMA_CTLR(channel);
647 channel = EDMA_CHAN_SLOT(channel);
651 for (i = 0; i < arch_num_cc; i++) {
654 channel = find_next_bit(edma_cc[i]->edma_unused,
655 edma_cc[i]->num_channels,
657 if (channel == edma_cc[i]->num_channels)
659 if (!test_and_set_bit(channel,
660 edma_cc[i]->edma_inuse)) {
672 } else if (channel >= edma_cc[ctlr]->num_channels) {
674 } else if (test_and_set_bit(channel, edma_cc[ctlr]->edma_inuse)) {
678 /* ensure access through shadow region 0 */
679 edma_or_array2(ctlr, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
681 /* ensure no events are pending */
682 edma_stop(EDMA_CTLR_CHAN(ctlr, channel));
683 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
684 &dummy_paramset, PARM_SIZE);
687 setup_dma_interrupt(EDMA_CTLR_CHAN(ctlr, channel),
690 map_dmach_queue(ctlr, channel, eventq_no);
692 return EDMA_CTLR_CHAN(ctlr, channel);
694 EXPORT_SYMBOL(edma_alloc_channel);
698 * edma_free_channel - deallocate DMA channel
699 * @channel: dma channel returned from edma_alloc_channel()
701 * This deallocates the DMA channel and associated parameter RAM slot
702 * allocated by edma_alloc_channel().
704 * Callers are responsible for ensuring the channel is inactive, and
705 * will not be reactivated by linking, chaining, or software calls to
708 void edma_free_channel(unsigned channel)
712 ctlr = EDMA_CTLR(channel);
713 channel = EDMA_CHAN_SLOT(channel);
715 if (channel >= edma_cc[ctlr]->num_channels)
718 setup_dma_interrupt(channel, NULL, NULL);
719 /* REVISIT should probably take out of shadow region 0 */
721 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(channel),
722 &dummy_paramset, PARM_SIZE);
723 clear_bit(channel, edma_cc[ctlr]->edma_inuse);
725 EXPORT_SYMBOL(edma_free_channel);
728 * edma_alloc_slot - allocate DMA parameter RAM
729 * @slot: specific slot to allocate; negative for "any unused slot"
731 * This allocates a parameter RAM slot, initializing it to hold a
732 * dummy transfer. Slots allocated using this routine have not been
733 * mapped to a hardware DMA channel, and will normally be used by
734 * linking to them from a slot associated with a DMA channel.
736 * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
737 * slots may be allocated on behalf of DSP firmware.
739 * Returns the number of the slot, else negative errno.
741 int edma_alloc_slot(unsigned ctlr, int slot)
744 slot = EDMA_CHAN_SLOT(slot);
747 slot = edma_cc[ctlr]->num_channels;
749 slot = find_next_zero_bit(edma_cc[ctlr]->edma_inuse,
750 edma_cc[ctlr]->num_slots, slot);
751 if (slot == edma_cc[ctlr]->num_slots)
753 if (!test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse))
756 } else if (slot < edma_cc[ctlr]->num_channels ||
757 slot >= edma_cc[ctlr]->num_slots) {
759 } else if (test_and_set_bit(slot, edma_cc[ctlr]->edma_inuse)) {
763 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
764 &dummy_paramset, PARM_SIZE);
766 return EDMA_CTLR_CHAN(ctlr, slot);
768 EXPORT_SYMBOL(edma_alloc_slot);
771 * edma_free_slot - deallocate DMA parameter RAM
772 * @slot: parameter RAM slot returned from edma_alloc_slot()
774 * This deallocates the parameter RAM slot allocated by edma_alloc_slot().
775 * Callers are responsible for ensuring the slot is inactive, and will
778 void edma_free_slot(unsigned slot)
782 ctlr = EDMA_CTLR(slot);
783 slot = EDMA_CHAN_SLOT(slot);
785 if (slot < edma_cc[ctlr]->num_channels ||
786 slot >= edma_cc[ctlr]->num_slots)
789 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
790 &dummy_paramset, PARM_SIZE);
791 clear_bit(slot, edma_cc[ctlr]->edma_inuse);
793 EXPORT_SYMBOL(edma_free_slot);
797 * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
798 * The API will return the starting point of a set of
799 * contiguous parameter RAM slots that have been requested
801 * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
802 * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
803 * @count: number of contiguous Paramter RAM slots
804 * @slot - the start value of Parameter RAM slot that should be passed if id
805 * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
807 * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
808 * contiguous Parameter RAM slots from parameter RAM 64 in the case of
809 * DaVinci SOCs and 32 in the case of DA8xx SOCs.
811 * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
812 * set of contiguous parameter RAM slots from the "slot" that is passed as an
813 * argument to the API.
815 * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
816 * starts looking for a set of contiguous parameter RAMs from the "slot"
817 * that is passed as an argument to the API. On failure the API will try to
818 * find a set of contiguous Parameter RAM slots from the remaining Parameter
821 int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
824 * The start slot requested should be greater than
825 * the number of channels and lesser than the total number
828 if ((id != EDMA_CONT_PARAMS_ANY) &&
829 (slot < edma_cc[ctlr]->num_channels ||
830 slot >= edma_cc[ctlr]->num_slots))
834 * The number of parameter RAM slots requested cannot be less than 1
835 * and cannot be more than the number of slots minus the number of
838 if (count < 1 || count >
839 (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
843 case EDMA_CONT_PARAMS_ANY:
844 return reserve_contiguous_slots(ctlr, id, count,
845 edma_cc[ctlr]->num_channels);
846 case EDMA_CONT_PARAMS_FIXED_EXACT:
847 case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
848 return reserve_contiguous_slots(ctlr, id, count, slot);
854 EXPORT_SYMBOL(edma_alloc_cont_slots);
857 * edma_free_cont_slots - deallocate DMA parameter RAM slots
858 * @slot: first parameter RAM of a set of parameter RAM slots to be freed
859 * @count: the number of contiguous parameter RAM slots to be freed
861 * This deallocates the parameter RAM slots allocated by
862 * edma_alloc_cont_slots.
863 * Callers/applications need to keep track of sets of contiguous
864 * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
866 * Callers are responsible for ensuring the slots are inactive, and will
869 int edma_free_cont_slots(unsigned slot, int count)
871 unsigned ctlr, slot_to_free;
874 ctlr = EDMA_CTLR(slot);
875 slot = EDMA_CHAN_SLOT(slot);
877 if (slot < edma_cc[ctlr]->num_channels ||
878 slot >= edma_cc[ctlr]->num_slots ||
882 for (i = slot; i < slot + count; ++i) {
884 slot_to_free = EDMA_CHAN_SLOT(i);
886 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
887 &dummy_paramset, PARM_SIZE);
888 clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
893 EXPORT_SYMBOL(edma_free_cont_slots);
895 /*-----------------------------------------------------------------------*/
897 /* Parameter RAM operations (i) -- read/write partial slots */
900 * edma_set_src - set initial DMA source address in parameter RAM slot
901 * @slot: parameter RAM slot being configured
902 * @src_port: physical address of source (memory, controller FIFO, etc)
903 * @addressMode: INCR, except in very rare cases
904 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
905 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
907 * Note that the source address is modified during the DMA transfer
908 * according to edma_set_src_index().
910 void edma_set_src(unsigned slot, dma_addr_t src_port,
911 enum address_mode mode, enum fifo_width width)
915 ctlr = EDMA_CTLR(slot);
916 slot = EDMA_CHAN_SLOT(slot);
918 if (slot < edma_cc[ctlr]->num_slots) {
919 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
922 /* set SAM and program FWID */
923 i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
928 edma_parm_write(ctlr, PARM_OPT, slot, i);
930 /* set the source port address
931 in source register of param structure */
932 edma_parm_write(ctlr, PARM_SRC, slot, src_port);
935 EXPORT_SYMBOL(edma_set_src);
938 * edma_set_dest - set initial DMA destination address in parameter RAM slot
939 * @slot: parameter RAM slot being configured
940 * @dest_port: physical address of destination (memory, controller FIFO, etc)
941 * @addressMode: INCR, except in very rare cases
942 * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
943 * width to use when addressing the fifo (e.g. W8BIT, W32BIT)
945 * Note that the destination address is modified during the DMA transfer
946 * according to edma_set_dest_index().
948 void edma_set_dest(unsigned slot, dma_addr_t dest_port,
949 enum address_mode mode, enum fifo_width width)
953 ctlr = EDMA_CTLR(slot);
954 slot = EDMA_CHAN_SLOT(slot);
956 if (slot < edma_cc[ctlr]->num_slots) {
957 unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
960 /* set DAM and program FWID */
961 i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
966 edma_parm_write(ctlr, PARM_OPT, slot, i);
967 /* set the destination port address
968 in dest register of param structure */
969 edma_parm_write(ctlr, PARM_DST, slot, dest_port);
972 EXPORT_SYMBOL(edma_set_dest);
975 * edma_get_position - returns the current transfer points
976 * @slot: parameter RAM slot being examined
977 * @src: pointer to source port position
978 * @dst: pointer to destination port position
980 * Returns current source and destination addresses for a particular
981 * parameter RAM slot. Its channel should not be active when this is called.
983 void edma_get_position(unsigned slot, dma_addr_t *src, dma_addr_t *dst)
985 struct edmacc_param temp;
988 ctlr = EDMA_CTLR(slot);
989 slot = EDMA_CHAN_SLOT(slot);
991 edma_read_slot(EDMA_CTLR_CHAN(ctlr, slot), &temp);
997 EXPORT_SYMBOL(edma_get_position);
1000 * edma_set_src_index - configure DMA source address indexing
1001 * @slot: parameter RAM slot being configured
1002 * @src_bidx: byte offset between source arrays in a frame
1003 * @src_cidx: byte offset between source frames in a block
1005 * Offsets are specified to support either contiguous or discontiguous
1006 * memory transfers, or repeated access to a hardware register, as needed.
1007 * When accessing hardware registers, both offsets are normally zero.
1009 void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
1013 ctlr = EDMA_CTLR(slot);
1014 slot = EDMA_CHAN_SLOT(slot);
1016 if (slot < edma_cc[ctlr]->num_slots) {
1017 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1018 0xffff0000, src_bidx);
1019 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1020 0xffff0000, src_cidx);
1023 EXPORT_SYMBOL(edma_set_src_index);
1026 * edma_set_dest_index - configure DMA destination address indexing
1027 * @slot: parameter RAM slot being configured
1028 * @dest_bidx: byte offset between destination arrays in a frame
1029 * @dest_cidx: byte offset between destination frames in a block
1031 * Offsets are specified to support either contiguous or discontiguous
1032 * memory transfers, or repeated access to a hardware register, as needed.
1033 * When accessing hardware registers, both offsets are normally zero.
1035 void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
1039 ctlr = EDMA_CTLR(slot);
1040 slot = EDMA_CHAN_SLOT(slot);
1042 if (slot < edma_cc[ctlr]->num_slots) {
1043 edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
1044 0x0000ffff, dest_bidx << 16);
1045 edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
1046 0x0000ffff, dest_cidx << 16);
1049 EXPORT_SYMBOL(edma_set_dest_index);
1052 * edma_set_transfer_params - configure DMA transfer parameters
1053 * @slot: parameter RAM slot being configured
1054 * @acnt: how many bytes per array (at least one)
1055 * @bcnt: how many arrays per frame (at least one)
1056 * @ccnt: how many frames per block (at least one)
1057 * @bcnt_rld: used only for A-Synchronized transfers; this specifies
1058 * the value to reload into bcnt when it decrements to zero
1059 * @sync_mode: ASYNC or ABSYNC
1061 * See the EDMA3 documentation to understand how to configure and link
1062 * transfers using the fields in PaRAM slots. If you are not doing it
1063 * all at once with edma_write_slot(), you will use this routine
1064 * plus two calls each for source and destination, setting the initial
1065 * address and saying how to index that address.
1067 * An example of an A-Synchronized transfer is a serial link using a
1068 * single word shift register. In that case, @acnt would be equal to
1069 * that word size; the serial controller issues a DMA synchronization
1070 * event to transfer each word, and memory access by the DMA transfer
1071 * controller will be word-at-a-time.
1073 * An example of an AB-Synchronized transfer is a device using a FIFO.
1074 * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
1075 * The controller with the FIFO issues DMA synchronization events when
1076 * the FIFO threshold is reached, and the DMA transfer controller will
1077 * transfer one frame to (or from) the FIFO. It will probably use
1078 * efficient burst modes to access memory.
1080 void edma_set_transfer_params(unsigned slot,
1081 u16 acnt, u16 bcnt, u16 ccnt,
1082 u16 bcnt_rld, enum sync_dimension sync_mode)
1086 ctlr = EDMA_CTLR(slot);
1087 slot = EDMA_CHAN_SLOT(slot);
1089 if (slot < edma_cc[ctlr]->num_slots) {
1090 edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
1091 0x0000ffff, bcnt_rld << 16);
1092 if (sync_mode == ASYNC)
1093 edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
1095 edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
1096 /* Set the acount, bcount, ccount registers */
1097 edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
1098 edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
1101 EXPORT_SYMBOL(edma_set_transfer_params);
1104 * edma_link - link one parameter RAM slot to another
1105 * @from: parameter RAM slot originating the link
1106 * @to: parameter RAM slot which is the link target
1108 * The originating slot should not be part of any active DMA transfer.
1110 void edma_link(unsigned from, unsigned to)
1112 unsigned ctlr_from, ctlr_to;
1114 ctlr_from = EDMA_CTLR(from);
1115 from = EDMA_CHAN_SLOT(from);
1116 ctlr_to = EDMA_CTLR(to);
1117 to = EDMA_CHAN_SLOT(to);
1119 if (from >= edma_cc[ctlr_from]->num_slots)
1121 if (to >= edma_cc[ctlr_to]->num_slots)
1123 edma_parm_modify(ctlr_from, PARM_LINK_BCNTRLD, from, 0xffff0000,
1126 EXPORT_SYMBOL(edma_link);
1129 * edma_unlink - cut link from one parameter RAM slot
1130 * @from: parameter RAM slot originating the link
1132 * The originating slot should not be part of any active DMA transfer.
1133 * Its link is set to 0xffff.
1135 void edma_unlink(unsigned from)
1139 ctlr = EDMA_CTLR(from);
1140 from = EDMA_CHAN_SLOT(from);
1142 if (from >= edma_cc[ctlr]->num_slots)
1144 edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
1146 EXPORT_SYMBOL(edma_unlink);
1148 /*-----------------------------------------------------------------------*/
1150 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
1153 * edma_write_slot - write parameter RAM data for slot
1154 * @slot: number of parameter RAM slot being modified
1155 * @param: data to be written into parameter RAM slot
1157 * Use this to assign all parameters of a transfer at once. This
1158 * allows more efficient setup of transfers than issuing multiple
1159 * calls to set up those parameters in small pieces, and provides
1160 * complete control over all transfer options.
1162 void edma_write_slot(unsigned slot, const struct edmacc_param *param)
1166 ctlr = EDMA_CTLR(slot);
1167 slot = EDMA_CHAN_SLOT(slot);
1169 if (slot >= edma_cc[ctlr]->num_slots)
1171 memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot), param,
1174 EXPORT_SYMBOL(edma_write_slot);
1177 * edma_read_slot - read parameter RAM data from slot
1178 * @slot: number of parameter RAM slot being copied
1179 * @param: where to store copy of parameter RAM data
1181 * Use this to read data from a parameter RAM slot, perhaps to
1182 * save them as a template for later reuse.
1184 void edma_read_slot(unsigned slot, struct edmacc_param *param)
1188 ctlr = EDMA_CTLR(slot);
1189 slot = EDMA_CHAN_SLOT(slot);
1191 if (slot >= edma_cc[ctlr]->num_slots)
1193 memcpy_fromio(param, edmacc_regs_base[ctlr] + PARM_OFFSET(slot),
1196 EXPORT_SYMBOL(edma_read_slot);
1198 /*-----------------------------------------------------------------------*/
1200 /* Various EDMA channel control operations */
1203 * edma_pause - pause dma on a channel
1204 * @channel: on which edma_start() has been called
1206 * This temporarily disables EDMA hardware events on the specified channel,
1207 * preventing them from triggering new transfers on its behalf
1209 void edma_pause(unsigned channel)
1213 ctlr = EDMA_CTLR(channel);
1214 channel = EDMA_CHAN_SLOT(channel);
1216 if (channel < edma_cc[ctlr]->num_channels) {
1217 unsigned int mask = BIT(channel & 0x1f);
1219 edma_shadow0_write_array(ctlr, SH_EECR, channel >> 5, mask);
1222 EXPORT_SYMBOL(edma_pause);
1225 * edma_resume - resumes dma on a paused channel
1226 * @channel: on which edma_pause() has been called
1228 * This re-enables EDMA hardware events on the specified channel.
1230 void edma_resume(unsigned channel)
1234 ctlr = EDMA_CTLR(channel);
1235 channel = EDMA_CHAN_SLOT(channel);
1237 if (channel < edma_cc[ctlr]->num_channels) {
1238 unsigned int mask = BIT(channel & 0x1f);
1240 edma_shadow0_write_array(ctlr, SH_EESR, channel >> 5, mask);
1243 EXPORT_SYMBOL(edma_resume);
1246 * edma_start - start dma on a channel
1247 * @channel: channel being activated
1249 * Channels with event associations will be triggered by their hardware
1250 * events, and channels without such associations will be triggered by
1251 * software. (At this writing there is no interface for using software
1252 * triggers except with channels that don't support hardware triggers.)
1254 * Returns zero on success, else negative errno.
1256 int edma_start(unsigned channel)
1260 ctlr = EDMA_CTLR(channel);
1261 channel = EDMA_CHAN_SLOT(channel);
1263 if (channel < edma_cc[ctlr]->num_channels) {
1264 int j = channel >> 5;
1265 unsigned int mask = BIT(channel & 0x1f);
1267 /* EDMA channels without event association */
1268 if (test_bit(channel, edma_cc[ctlr]->edma_unused)) {
1269 pr_debug("EDMA: ESR%d %08x\n", j,
1270 edma_shadow0_read_array(ctlr, SH_ESR, j));
1271 edma_shadow0_write_array(ctlr, SH_ESR, j, mask);
1275 /* EDMA channel with event association */
1276 pr_debug("EDMA: ER%d %08x\n", j,
1277 edma_shadow0_read_array(ctlr, SH_ER, j));
1278 /* Clear any pending event or error */
1279 edma_write_array(ctlr, EDMA_ECR, j, mask);
1280 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1282 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1283 edma_shadow0_write_array(ctlr, SH_EESR, j, mask);
1284 pr_debug("EDMA: EER%d %08x\n", j,
1285 edma_shadow0_read_array(ctlr, SH_EER, j));
1291 EXPORT_SYMBOL(edma_start);
1294 * edma_stop - stops dma on the channel passed
1295 * @channel: channel being deactivated
1297 * When @lch is a channel, any active transfer is paused and
1298 * all pending hardware events are cleared. The current transfer
1299 * may not be resumed, and the channel's Parameter RAM should be
1300 * reinitialized before being reused.
1302 void edma_stop(unsigned channel)
1306 ctlr = EDMA_CTLR(channel);
1307 channel = EDMA_CHAN_SLOT(channel);
1309 if (channel < edma_cc[ctlr]->num_channels) {
1310 int j = channel >> 5;
1311 unsigned int mask = BIT(channel & 0x1f);
1313 edma_shadow0_write_array(ctlr, SH_EECR, j, mask);
1314 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1315 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1316 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1318 pr_debug("EDMA: EER%d %08x\n", j,
1319 edma_shadow0_read_array(ctlr, SH_EER, j));
1321 /* REVISIT: consider guarding against inappropriate event
1322 * chaining by overwriting with dummy_paramset.
1326 EXPORT_SYMBOL(edma_stop);
1328 /******************************************************************************
1330 * It cleans ParamEntry qand bring back EDMA to initial state if media has
1331 * been removed before EDMA has finished.It is usedful for removable media.
1333 * ch_no - channel no
1335 * Return: zero on success, or corresponding error no on failure
1337 * FIXME this should not be needed ... edma_stop() should suffice.
1339 *****************************************************************************/
1341 void edma_clean_channel(unsigned channel)
1345 ctlr = EDMA_CTLR(channel);
1346 channel = EDMA_CHAN_SLOT(channel);
1348 if (channel < edma_cc[ctlr]->num_channels) {
1349 int j = (channel >> 5);
1350 unsigned int mask = BIT(channel & 0x1f);
1352 pr_debug("EDMA: EMR%d %08x\n", j,
1353 edma_read_array(ctlr, EDMA_EMR, j));
1354 edma_shadow0_write_array(ctlr, SH_ECR, j, mask);
1355 /* Clear the corresponding EMR bits */
1356 edma_write_array(ctlr, EDMA_EMCR, j, mask);
1358 edma_shadow0_write_array(ctlr, SH_SECR, j, mask);
1359 edma_write(ctlr, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
1362 EXPORT_SYMBOL(edma_clean_channel);
1365 * edma_clear_event - clear an outstanding event on the DMA channel
1367 * channel - channel number
1369 void edma_clear_event(unsigned channel)
1373 ctlr = EDMA_CTLR(channel);
1374 channel = EDMA_CHAN_SLOT(channel);
1376 if (channel >= edma_cc[ctlr]->num_channels)
1379 edma_write(ctlr, EDMA_ECR, BIT(channel));
1381 edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
1383 EXPORT_SYMBOL(edma_clear_event);
1385 /*-----------------------------------------------------------------------*/
1387 static int __init edma_probe(struct platform_device *pdev)
1389 struct edma_soc_info **info = pdev->dev.platform_data;
1390 const s8 (*queue_priority_mapping)[2];
1391 const s8 (*queue_tc_mapping)[2];
1392 int i, j, off, ln, found = 0;
1394 const s16 (*rsv_chans)[2];
1395 const s16 (*rsv_slots)[2];
1396 int irq[EDMA_MAX_CC] = {0, 0};
1397 int err_irq[EDMA_MAX_CC] = {0, 0};
1398 struct resource *r[EDMA_MAX_CC] = {NULL};
1399 resource_size_t len[EDMA_MAX_CC];
1406 for (j = 0; j < EDMA_MAX_CC; j++) {
1407 sprintf(res_name, "edma_cc%d", j);
1408 r[j] = platform_get_resource_byname(pdev, IORESOURCE_MEM,
1410 if (!r[j] || !info[j]) {
1419 len[j] = resource_size(r[j]);
1421 r[j] = request_mem_region(r[j]->start, len[j],
1422 dev_name(&pdev->dev));
1428 edmacc_regs_base[j] = ioremap(r[j]->start, len[j]);
1429 if (!edmacc_regs_base[j]) {
1434 edma_cc[j] = kmalloc(sizeof(struct edma), GFP_KERNEL);
1439 memset(edma_cc[j], 0, sizeof(struct edma));
1441 edma_cc[j]->num_channels = min_t(unsigned, info[j]->n_channel,
1443 edma_cc[j]->num_slots = min_t(unsigned, info[j]->n_slot,
1444 EDMA_MAX_PARAMENTRY);
1445 edma_cc[j]->num_cc = min_t(unsigned, info[j]->n_cc,
1448 edma_cc[j]->default_queue = info[j]->default_queue;
1449 if (!edma_cc[j]->default_queue)
1450 edma_cc[j]->default_queue = EVENTQ_1;
1452 dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
1453 edmacc_regs_base[j]);
1455 for (i = 0; i < edma_cc[j]->num_slots; i++)
1456 memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
1457 &dummy_paramset, PARM_SIZE);
1459 /* Mark all channels as unused */
1460 memset(edma_cc[j]->edma_unused, 0xff,
1461 sizeof(edma_cc[j]->edma_unused));
1465 /* Clear the reserved channels in unused list */
1466 rsv_chans = info[j]->rsv->rsv_chans;
1468 for (i = 0; rsv_chans[i][0] != -1; i++) {
1469 off = rsv_chans[i][0];
1470 ln = rsv_chans[i][1];
1472 edma_cc[j]->edma_unused);
1476 /* Set the reserved slots in inuse list */
1477 rsv_slots = info[j]->rsv->rsv_slots;
1479 for (i = 0; rsv_slots[i][0] != -1; i++) {
1480 off = rsv_slots[i][0];
1481 ln = rsv_slots[i][1];
1483 edma_cc[j]->edma_inuse);
1488 sprintf(irq_name, "edma%d", j);
1489 irq[j] = platform_get_irq_byname(pdev, irq_name);
1490 edma_cc[j]->irq_res_start = irq[j];
1491 status = request_irq(irq[j], dma_irq_handler, 0, "edma",
1494 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1499 sprintf(irq_name, "edma%d_err", j);
1500 err_irq[j] = platform_get_irq_byname(pdev, irq_name);
1501 edma_cc[j]->irq_res_end = err_irq[j];
1502 status = request_irq(err_irq[j], dma_ccerr_handler, 0,
1503 "edma_error", &pdev->dev);
1505 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1506 err_irq[j], status);
1510 /* Everything lives on transfer controller 1 until otherwise
1511 * specified. This way, long transfers on the low priority queue
1512 * started by the codec engine will not cause audio defects.
1514 for (i = 0; i < edma_cc[j]->num_channels; i++)
1515 map_dmach_queue(j, i, EVENTQ_1);
1517 queue_tc_mapping = info[j]->queue_tc_mapping;
1518 queue_priority_mapping = info[j]->queue_priority_mapping;
1520 /* Event queue to TC mapping */
1521 for (i = 0; queue_tc_mapping[i][0] != -1; i++)
1522 map_queue_tc(j, queue_tc_mapping[i][0],
1523 queue_tc_mapping[i][1]);
1525 /* Event queue priority mapping */
1526 for (i = 0; queue_priority_mapping[i][0] != -1; i++)
1527 assign_priority_to_queue(j,
1528 queue_priority_mapping[i][0],
1529 queue_priority_mapping[i][1]);
1531 /* Map the channel to param entry if channel mapping logic
1534 if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
1537 for (i = 0; i < info[j]->n_region; i++) {
1538 edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
1539 edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
1540 edma_write_array(j, EDMA_QRAE, i, 0x0);
1545 if (tc_errs_handled) {
1546 status = request_irq(IRQ_TCERRINT0, dma_tc0err_handler, 0,
1547 "edma_tc0", &pdev->dev);
1549 dev_dbg(&pdev->dev, "request_irq %d failed --> %d\n",
1550 IRQ_TCERRINT0, status);
1553 status = request_irq(IRQ_TCERRINT, dma_tc1err_handler, 0,
1554 "edma_tc1", &pdev->dev);
1556 dev_dbg(&pdev->dev, "request_irq %d --> %d\n",
1557 IRQ_TCERRINT, status);
1565 for (i = 0; i < EDMA_MAX_CC; i++) {
1567 free_irq(err_irq[i], &pdev->dev);
1569 free_irq(irq[i], &pdev->dev);
1572 for (i = 0; i < EDMA_MAX_CC; i++) {
1574 release_mem_region(r[i]->start, len[i]);
1575 if (edmacc_regs_base[i])
1576 iounmap(edmacc_regs_base[i]);
1583 static struct platform_driver edma_driver = {
1584 .driver.name = "edma",
1587 static int __init edma_init(void)
1589 return platform_driver_probe(&edma_driver, edma_probe);
1591 arch_initcall(edma_init);