2 * arch/arm/mach-tegra/dma.c
4 * System DMA driver for NVIDIA Tegra SoCs
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/err.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
31 #include <mach/irqs.h>
32 #include <mach/iomap.h>
33 #include <mach/suspend.h>
35 #define APB_DMA_GEN 0x000
36 #define GEN_ENABLE (1<<31)
38 #define APB_DMA_CNTRL 0x010
40 #define APB_DMA_IRQ_MASK 0x01c
42 #define APB_DMA_IRQ_MASK_SET 0x020
44 #define APB_DMA_CHAN_CSR 0x000
45 #define CSR_ENB (1<<31)
46 #define CSR_IE_EOC (1<<30)
47 #define CSR_HOLD (1<<29)
48 #define CSR_DIR (1<<28)
49 #define CSR_ONCE (1<<27)
50 #define CSR_FLOW (1<<21)
51 #define CSR_REQ_SEL_SHIFT 16
52 #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
53 #define CSR_WCOUNT_SHIFT 2
54 #define CSR_WCOUNT_MASK 0xFFFC
56 #define APB_DMA_CHAN_STA 0x004
57 #define STA_BUSY (1<<31)
58 #define STA_ISE_EOC (1<<30)
59 #define STA_HALT (1<<29)
60 #define STA_PING_PONG (1<<28)
61 #define STA_COUNT_SHIFT 2
62 #define STA_COUNT_MASK 0xFFFC
64 #define APB_DMA_CHAN_AHB_PTR 0x010
66 #define APB_DMA_CHAN_AHB_SEQ 0x014
67 #define AHB_SEQ_INTR_ENB (1<<31)
68 #define AHB_SEQ_BUS_WIDTH_SHIFT 28
69 #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
70 #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
71 #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
72 #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
73 #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
74 #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
75 #define AHB_SEQ_DATA_SWAP (1<<27)
76 #define AHB_SEQ_BURST_MASK (0x7<<24)
77 #define AHB_SEQ_BURST_1 (4<<24)
78 #define AHB_SEQ_BURST_4 (5<<24)
79 #define AHB_SEQ_BURST_8 (6<<24)
80 #define AHB_SEQ_DBL_BUF (1<<19)
81 #define AHB_SEQ_WRAP_SHIFT 16
82 #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
84 #define APB_DMA_CHAN_APB_PTR 0x018
86 #define APB_DMA_CHAN_APB_SEQ 0x01c
87 #define APB_SEQ_BUS_WIDTH_SHIFT 28
88 #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
89 #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
90 #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
91 #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
92 #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
93 #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
94 #define APB_SEQ_DATA_SWAP (1<<27)
95 #define APB_SEQ_WRAP_SHIFT 16
96 #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
98 #define TEGRA_SYSTEM_DMA_CH_NR 16
99 #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
100 #define TEGRA_SYSTEM_DMA_CH_MIN 0
101 #define TEGRA_SYSTEM_DMA_CH_MAX \
102 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
104 const unsigned int ahb_addr_wrap_table[8] = {
105 0, 32, 64, 128, 256, 512, 1024, 2048
108 const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
110 const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
112 #define TEGRA_DMA_NAME_SIZE 16
113 struct tegra_dma_channel {
114 struct list_head list;
117 char name[TEGRA_DMA_NAME_SIZE];
121 int req_transfer_count;
124 #define NV_DMA_MAX_CHANNELS 32
126 static DEFINE_MUTEX(tegra_dma_lock);
127 static DEFINE_SPINLOCK(enable_lock);
129 static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
130 static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
132 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
133 struct tegra_dma_req *req);
134 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
135 struct tegra_dma_req *req);
136 static void tegra_dma_stop(struct tegra_dma_channel *ch);
138 void tegra_dma_flush(struct tegra_dma_channel *ch)
141 EXPORT_SYMBOL(tegra_dma_flush);
143 void tegra_dma_dequeue(struct tegra_dma_channel *ch)
145 struct tegra_dma_req *req;
147 if (tegra_dma_is_empty(ch))
150 req = list_entry(ch->list.next, typeof(*req), node);
152 tegra_dma_dequeue_req(ch, req);
156 void tegra_dma_stop(struct tegra_dma_channel *ch)
161 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
163 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
166 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
168 status = readl(ch->addr + APB_DMA_CHAN_STA);
169 if (status & STA_ISE_EOC)
170 writel(status, ch->addr + APB_DMA_CHAN_STA);
173 int tegra_dma_cancel(struct tegra_dma_channel *ch)
175 unsigned long irq_flags;
177 spin_lock_irqsave(&ch->lock, irq_flags);
178 while (!list_empty(&ch->list))
179 list_del(ch->list.next);
183 spin_unlock_irqrestore(&ch->lock, irq_flags);
187 /* should be called with the channel lock held */
188 static unsigned int dma_active_count(struct tegra_dma_channel *ch,
189 struct tegra_dma_req *req, unsigned int status)
191 unsigned int to_transfer;
192 unsigned int req_transfer_count;
194 unsigned int bytes_transferred;
196 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
197 req_transfer_count = ch->req_transfer_count;
198 req_transfer_count += 1;
201 bytes_transferred = req_transfer_count;
203 if (status & STA_BUSY)
204 bytes_transferred -= to_transfer;
206 /* In continuous transfer mode, DMA only tracks the count of the
207 * half DMA buffer. So, if the DMA already finished half the DMA
208 * then add the half buffer to the completed count.
210 if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS)
211 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
212 bytes_transferred += req_transfer_count;
214 if (status & STA_ISE_EOC)
215 bytes_transferred += req_transfer_count;
217 bytes_transferred *= 4;
219 return bytes_transferred;
222 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
223 struct tegra_dma_req *_req)
225 struct tegra_dma_req *req = NULL;
228 unsigned long irq_flags;
230 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
232 spin_lock_irqsave(&ch->lock, irq_flags);
234 if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
237 list_for_each_entry(req, &ch->list, node) {
239 list_del(&req->node);
245 spin_unlock_irqrestore(&ch->lock, irq_flags);
252 /* STOP the DMA and get the transfer count.
253 * Getting the transfer count is tricky.
254 * - Globally disable DMA on all channels
255 * - Read the channel's status register to know the number of pending
256 * bytes to be transfered.
257 * - Stop the dma channel
258 * - Globally re-enable DMA to resume other transfers
260 spin_lock(&enable_lock);
261 writel(0, addr + APB_DMA_GEN);
263 status = readl(ch->addr + APB_DMA_CHAN_STA);
265 writel(GEN_ENABLE, addr + APB_DMA_GEN);
266 spin_unlock(&enable_lock);
268 req->bytes_transferred = dma_active_count(ch, req, status);
270 if (!list_empty(&ch->list)) {
271 /* if the list is not empty, queue the next request */
272 struct tegra_dma_req *next_req;
273 next_req = list_entry(ch->list.next,
274 typeof(*next_req), node);
275 tegra_dma_update_hw(ch, next_req);
278 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
280 spin_unlock_irqrestore(&ch->lock, irq_flags);
282 /* Callback should be called without any lock */
286 EXPORT_SYMBOL(tegra_dma_dequeue_req);
288 bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
290 unsigned long irq_flags;
293 spin_lock_irqsave(&ch->lock, irq_flags);
294 if (list_empty(&ch->list))
298 spin_unlock_irqrestore(&ch->lock, irq_flags);
301 EXPORT_SYMBOL(tegra_dma_is_empty);
303 bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
304 struct tegra_dma_req *_req)
306 unsigned long irq_flags;
307 struct tegra_dma_req *req;
309 spin_lock_irqsave(&ch->lock, irq_flags);
310 list_for_each_entry(req, &ch->list, node) {
312 spin_unlock_irqrestore(&ch->lock, irq_flags);
316 spin_unlock_irqrestore(&ch->lock, irq_flags);
319 EXPORT_SYMBOL(tegra_dma_is_req_inflight);
321 int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
322 struct tegra_dma_req *req)
324 unsigned long irq_flags;
327 if (req->size > TEGRA_DMA_MAX_TRANSFER_SIZE ||
328 req->source_addr & 0x3 || req->dest_addr & 0x3) {
329 pr_err("Invalid DMA request for channel %d\n", ch->id);
333 spin_lock_irqsave(&ch->lock, irq_flags);
335 req->bytes_transferred = 0;
337 req->buffer_status = 0;
338 if (list_empty(&ch->list))
341 list_add_tail(&req->node, &ch->list);
344 tegra_dma_update_hw(ch, req);
346 spin_unlock_irqrestore(&ch->lock, irq_flags);
350 EXPORT_SYMBOL(tegra_dma_enqueue_req);
352 struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
355 struct tegra_dma_channel *ch = NULL;
357 mutex_lock(&tegra_dma_lock);
359 /* first channel is the shared channel */
360 if (mode & TEGRA_DMA_SHARED) {
361 channel = TEGRA_SYSTEM_DMA_CH_MIN;
363 channel = find_first_zero_bit(channel_usage,
364 ARRAY_SIZE(dma_channels));
365 if (channel >= ARRAY_SIZE(dma_channels)) {
366 pr_err("%s: failed to allocate a DMA channel",
371 __set_bit(channel, channel_usage);
372 ch = &dma_channels[channel];
376 mutex_unlock(&tegra_dma_lock);
379 EXPORT_SYMBOL(tegra_dma_allocate_channel);
381 void tegra_dma_free_channel(struct tegra_dma_channel *ch)
383 if (ch->mode & TEGRA_DMA_SHARED)
385 tegra_dma_cancel(ch);
386 mutex_lock(&tegra_dma_lock);
387 __clear_bit(ch->id, channel_usage);
388 mutex_unlock(&tegra_dma_lock);
390 EXPORT_SYMBOL(tegra_dma_free_channel);
392 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
393 struct tegra_dma_req *req)
398 if (req->to_memory) {
399 apb_ptr = req->source_addr;
400 ahb_ptr = req->dest_addr;
402 apb_ptr = req->dest_addr;
403 ahb_ptr = req->source_addr;
405 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
406 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
408 req->status = TEGRA_DMA_REQ_INFLIGHT;
412 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
413 struct tegra_dma_req *req)
427 csr = CSR_IE_EOC | CSR_FLOW;
428 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
431 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
433 /* One shot mode is always single buffered,
434 * continuous mode is always double buffered
436 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
438 ch->req_transfer_count = (req->size >> 2) - 1;
440 ahb_seq |= AHB_SEQ_DBL_BUF;
442 /* In double buffered mode, we set the size to half the
443 * requested size and interrupt when half the buffer
445 ch->req_transfer_count = (req->size >> 3) - 1;
448 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
450 if (req->to_memory) {
451 apb_ptr = req->source_addr;
452 ahb_ptr = req->dest_addr;
454 apb_addr_wrap = req->source_wrap;
455 ahb_addr_wrap = req->dest_wrap;
456 apb_bus_width = req->source_bus_width;
457 ahb_bus_width = req->dest_bus_width;
461 apb_ptr = req->dest_addr;
462 ahb_ptr = req->source_addr;
464 apb_addr_wrap = req->dest_wrap;
465 ahb_addr_wrap = req->source_wrap;
466 apb_bus_width = req->dest_bus_width;
467 ahb_bus_width = req->source_bus_width;
473 /* set address wrap for APB size */
476 if (apb_addr_wrap_table[index] == apb_addr_wrap)
479 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
480 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
481 apb_seq |= index << APB_SEQ_WRAP_SHIFT;
483 /* set address wrap for AHB size */
486 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
489 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
490 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
491 ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
493 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
494 if (bus_width_table[index] == ahb_bus_width)
497 BUG_ON(index == ARRAY_SIZE(bus_width_table));
498 ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
500 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
501 if (bus_width_table[index] == apb_bus_width)
504 BUG_ON(index == ARRAY_SIZE(bus_width_table));
505 apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
507 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
508 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
509 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
510 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
511 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
514 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
516 req->status = TEGRA_DMA_REQ_INFLIGHT;
519 static void handle_oneshot_dma(struct tegra_dma_channel *ch)
521 struct tegra_dma_req *req;
522 unsigned long irq_flags;
524 spin_lock_irqsave(&ch->lock, irq_flags);
525 if (list_empty(&ch->list)) {
526 spin_unlock_irqrestore(&ch->lock, irq_flags);
530 req = list_entry(ch->list.next, typeof(*req), node);
532 int bytes_transferred;
534 bytes_transferred = ch->req_transfer_count;
535 bytes_transferred += 1;
536 bytes_transferred <<= 2;
538 list_del(&req->node);
539 req->bytes_transferred = bytes_transferred;
540 req->status = TEGRA_DMA_REQ_SUCCESS;
542 spin_unlock_irqrestore(&ch->lock, irq_flags);
543 /* Callback should be called without any lock */
544 pr_debug("%s: transferred %d bytes\n", __func__,
545 req->bytes_transferred);
547 spin_lock_irqsave(&ch->lock, irq_flags);
550 if (!list_empty(&ch->list)) {
551 req = list_entry(ch->list.next, typeof(*req), node);
552 /* the complete function we just called may have enqueued
553 another req, in which case dma has already started */
554 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
555 tegra_dma_update_hw(ch, req);
557 spin_unlock_irqrestore(&ch->lock, irq_flags);
560 static void handle_continuous_dma(struct tegra_dma_channel *ch)
562 struct tegra_dma_req *req;
563 struct tegra_dma_req *next_req;
564 unsigned long irq_flags;
566 spin_lock_irqsave(&ch->lock, irq_flags);
567 if (list_empty(&ch->list)) {
568 spin_unlock_irqrestore(&ch->lock, irq_flags);
572 req = list_entry(ch->list.next, typeof(*req), node);
574 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
575 bool is_dma_ping_complete;
576 is_dma_ping_complete =
577 !!(readl(ch->addr + APB_DMA_CHAN_STA) &
580 is_dma_ping_complete = !is_dma_ping_complete;
581 /* Out of sync - Release current buffer */
582 if (!is_dma_ping_complete) {
583 int bytes_transferred;
585 bytes_transferred = ch->req_transfer_count;
586 bytes_transferred += 1;
587 bytes_transferred <<= 3;
589 TEGRA_DMA_REQ_BUF_STATUS_FULL;
590 req->bytes_transferred = bytes_transferred;
591 req->status = TEGRA_DMA_REQ_SUCCESS;
594 if (!list_is_last(&req->node, &ch->list)) {
595 next_req = list_entry(req->node.next,
596 typeof(*next_req), node);
597 tegra_dma_update_hw(ch, next_req);
600 list_del(&req->node);
602 /* DMA lock is NOT held when callbak is
604 spin_unlock_irqrestore(&ch->lock, irq_flags);
608 /* Load the next request into the hardware, if
610 if (!list_is_last(&req->node, &ch->list)) {
611 next_req = list_entry(req->node.next,
612 typeof(*next_req), node);
613 tegra_dma_update_hw_partial(ch, next_req);
615 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
616 req->status = TEGRA_DMA_REQ_SUCCESS;
617 /* DMA lock is NOT held when callback is called */
618 spin_unlock_irqrestore(&ch->lock, irq_flags);
619 if (likely(req->threshold))
623 } else if (req->buffer_status ==
624 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
625 /* Callback when the buffer is completely full (i.e on
626 * the second interrupt */
627 int bytes_transferred;
629 bytes_transferred = ch->req_transfer_count;
630 bytes_transferred += 1;
631 bytes_transferred <<= 3;
633 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
634 req->bytes_transferred = bytes_transferred;
635 req->status = TEGRA_DMA_REQ_SUCCESS;
636 if (list_is_last(&req->node, &ch->list))
639 /* It may be possible that req came after
640 * half dma complete so it need to start
642 next_req = list_entry(req->node.next,
643 typeof(*next_req), node);
644 if (next_req->status !=
645 TEGRA_DMA_REQ_INFLIGHT) {
647 tegra_dma_update_hw(ch, next_req);
651 list_del(&req->node);
653 /* DMA lock is NOT held when callbak is called */
654 spin_unlock_irqrestore(&ch->lock, irq_flags);
660 /* Dma should be stop much earlier */
664 spin_unlock_irqrestore(&ch->lock, irq_flags);
667 static irqreturn_t dma_isr(int irq, void *data)
669 struct tegra_dma_channel *ch = data;
670 unsigned long status;
672 status = readl(ch->addr + APB_DMA_CHAN_STA);
673 if (status & STA_ISE_EOC)
674 writel(status, ch->addr + APB_DMA_CHAN_STA);
676 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
680 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
681 handle_oneshot_dma(ch);
683 handle_continuous_dma(ch);
687 int __init tegra_dma_init(void)
694 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
695 writel(GEN_ENABLE, addr + APB_DMA_GEN);
696 writel(0, addr + APB_DMA_CNTRL);
697 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
698 addr + APB_DMA_IRQ_MASK_SET);
700 memset(channel_usage, 0, sizeof(channel_usage));
701 memset(dma_channels, 0, sizeof(dma_channels));
703 /* Reserve all the channels we are not supposed to touch */
704 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
705 __set_bit(i, channel_usage);
707 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
708 struct tegra_dma_channel *ch = &dma_channels[i];
710 __clear_bit(i, channel_usage);
713 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
715 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
716 TEGRA_APB_DMA_CH0_SIZE * i);
718 spin_lock_init(&ch->lock);
719 INIT_LIST_HEAD(&ch->list);
721 irq = INT_APB_DMA_CH0 + i;
722 ret = request_irq(irq, dma_isr, 0, dma_channels[i].name, ch);
724 pr_err("Failed to register IRQ %d for DMA %d\n",
730 /* mark the shared channel allocated */
731 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
733 for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
734 __set_bit(i, channel_usage);
738 writel(0, addr + APB_DMA_GEN);
739 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
740 struct tegra_dma_channel *ch = &dma_channels[i];
742 free_irq(ch->irq, ch);
748 static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
750 void tegra_dma_suspend(void)
752 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
756 *ctx++ = readl(addr + APB_DMA_GEN);
757 *ctx++ = readl(addr + APB_DMA_CNTRL);
758 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
760 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
761 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
762 TEGRA_APB_DMA_CH0_SIZE * i);
764 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
765 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
766 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
767 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
768 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
772 void tegra_dma_resume(void)
774 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
778 writel(*ctx++, addr + APB_DMA_GEN);
779 writel(*ctx++, addr + APB_DMA_CNTRL);
780 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
782 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
783 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
784 TEGRA_APB_DMA_CH0_SIZE * i);
786 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
787 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
788 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
789 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
790 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);