2 * arch/arm/mach-tegra/dma.c
4 * System DMA driver for NVIDIA Tegra SoCs
6 * Copyright (c) 2008-2009, NVIDIA Corporation.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/interrupt.h>
25 #include <linux/module.h>
26 #include <linux/spinlock.h>
27 #include <linux/err.h>
28 #include <linux/irq.h>
29 #include <linux/delay.h>
31 #include <mach/irqs.h>
32 #include <mach/iomap.h>
33 #include <mach/suspend.h>
35 #define APB_DMA_GEN 0x000
36 #define GEN_ENABLE (1<<31)
38 #define APB_DMA_CNTRL 0x010
40 #define APB_DMA_IRQ_MASK 0x01c
42 #define APB_DMA_IRQ_MASK_SET 0x020
44 #define APB_DMA_CHAN_CSR 0x000
45 #define CSR_ENB (1<<31)
46 #define CSR_IE_EOC (1<<30)
47 #define CSR_HOLD (1<<29)
48 #define CSR_DIR (1<<28)
49 #define CSR_ONCE (1<<27)
50 #define CSR_FLOW (1<<21)
51 #define CSR_REQ_SEL_SHIFT 16
52 #define CSR_REQ_SEL_MASK (0x1F<<CSR_REQ_SEL_SHIFT)
53 #define CSR_WCOUNT_SHIFT 2
54 #define CSR_WCOUNT_MASK 0xFFFC
56 #define APB_DMA_CHAN_STA 0x004
57 #define STA_BUSY (1<<31)
58 #define STA_ISE_EOC (1<<30)
59 #define STA_HALT (1<<29)
60 #define STA_PING_PONG (1<<28)
61 #define STA_COUNT_SHIFT 2
62 #define STA_COUNT_MASK 0xFFFC
64 #define APB_DMA_CHAN_AHB_PTR 0x010
66 #define APB_DMA_CHAN_AHB_SEQ 0x014
67 #define AHB_SEQ_INTR_ENB (1<<31)
68 #define AHB_SEQ_BUS_WIDTH_SHIFT 28
69 #define AHB_SEQ_BUS_WIDTH_MASK (0x7<<AHB_SEQ_BUS_WIDTH_SHIFT)
70 #define AHB_SEQ_BUS_WIDTH_8 (0<<AHB_SEQ_BUS_WIDTH_SHIFT)
71 #define AHB_SEQ_BUS_WIDTH_16 (1<<AHB_SEQ_BUS_WIDTH_SHIFT)
72 #define AHB_SEQ_BUS_WIDTH_32 (2<<AHB_SEQ_BUS_WIDTH_SHIFT)
73 #define AHB_SEQ_BUS_WIDTH_64 (3<<AHB_SEQ_BUS_WIDTH_SHIFT)
74 #define AHB_SEQ_BUS_WIDTH_128 (4<<AHB_SEQ_BUS_WIDTH_SHIFT)
75 #define AHB_SEQ_DATA_SWAP (1<<27)
76 #define AHB_SEQ_BURST_MASK (0x7<<24)
77 #define AHB_SEQ_BURST_1 (4<<24)
78 #define AHB_SEQ_BURST_4 (5<<24)
79 #define AHB_SEQ_BURST_8 (6<<24)
80 #define AHB_SEQ_DBL_BUF (1<<19)
81 #define AHB_SEQ_WRAP_SHIFT 16
82 #define AHB_SEQ_WRAP_MASK (0x7<<AHB_SEQ_WRAP_SHIFT)
84 #define APB_DMA_CHAN_APB_PTR 0x018
86 #define APB_DMA_CHAN_APB_SEQ 0x01c
87 #define APB_SEQ_BUS_WIDTH_SHIFT 28
88 #define APB_SEQ_BUS_WIDTH_MASK (0x7<<APB_SEQ_BUS_WIDTH_SHIFT)
89 #define APB_SEQ_BUS_WIDTH_8 (0<<APB_SEQ_BUS_WIDTH_SHIFT)
90 #define APB_SEQ_BUS_WIDTH_16 (1<<APB_SEQ_BUS_WIDTH_SHIFT)
91 #define APB_SEQ_BUS_WIDTH_32 (2<<APB_SEQ_BUS_WIDTH_SHIFT)
92 #define APB_SEQ_BUS_WIDTH_64 (3<<APB_SEQ_BUS_WIDTH_SHIFT)
93 #define APB_SEQ_BUS_WIDTH_128 (4<<APB_SEQ_BUS_WIDTH_SHIFT)
94 #define APB_SEQ_DATA_SWAP (1<<27)
95 #define APB_SEQ_WRAP_SHIFT 16
96 #define APB_SEQ_WRAP_MASK (0x7<<APB_SEQ_WRAP_SHIFT)
98 #define TEGRA_SYSTEM_DMA_CH_NR 16
99 #define TEGRA_SYSTEM_DMA_AVP_CH_NUM 4
100 #define TEGRA_SYSTEM_DMA_CH_MIN 0
101 #define TEGRA_SYSTEM_DMA_CH_MAX \
102 (TEGRA_SYSTEM_DMA_CH_NR - TEGRA_SYSTEM_DMA_AVP_CH_NUM - 1)
104 const unsigned int ahb_addr_wrap_table[8] = {
105 0, 32, 64, 128, 256, 512, 1024, 2048
108 const unsigned int apb_addr_wrap_table[8] = {0, 1, 2, 4, 8, 16, 32, 64};
110 const unsigned int bus_width_table[5] = {8, 16, 32, 64, 128};
112 #define TEGRA_DMA_NAME_SIZE 16
113 struct tegra_dma_channel {
114 struct list_head list;
117 char name[TEGRA_DMA_NAME_SIZE];
121 int req_transfer_count;
124 #define NV_DMA_MAX_CHANNELS 32
126 static DEFINE_MUTEX(tegra_dma_lock);
127 static DEFINE_SPINLOCK(enable_lock);
129 static DECLARE_BITMAP(channel_usage, NV_DMA_MAX_CHANNELS);
130 static struct tegra_dma_channel dma_channels[NV_DMA_MAX_CHANNELS];
132 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
133 struct tegra_dma_req *req);
134 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
135 struct tegra_dma_req *req);
136 static void tegra_dma_stop(struct tegra_dma_channel *ch);
138 void tegra_dma_flush(struct tegra_dma_channel *ch)
141 EXPORT_SYMBOL(tegra_dma_flush);
143 void tegra_dma_dequeue(struct tegra_dma_channel *ch)
145 struct tegra_dma_req *req;
147 if (tegra_dma_is_empty(ch))
150 req = list_entry(ch->list.next, typeof(*req), node);
152 tegra_dma_dequeue_req(ch, req);
156 void tegra_dma_stop(struct tegra_dma_channel *ch)
161 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
163 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
166 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
168 status = readl(ch->addr + APB_DMA_CHAN_STA);
169 if (status & STA_ISE_EOC)
170 writel(status, ch->addr + APB_DMA_CHAN_STA);
173 bool tegra_dma_is_stopped(struct tegra_dma_channel *ch)
175 return !!(readl(ch->addr + APB_DMA_CHAN_STA) & CSR_ENB);
178 int tegra_dma_cancel(struct tegra_dma_channel *ch)
180 unsigned long irq_flags;
182 spin_lock_irqsave(&ch->lock, irq_flags);
183 while (!list_empty(&ch->list))
184 list_del(ch->list.next);
188 spin_unlock_irqrestore(&ch->lock, irq_flags);
192 /* should be called with the channel lock held */
193 static unsigned int dma_active_count(struct tegra_dma_channel *ch,
194 struct tegra_dma_req *req, unsigned int status)
196 unsigned int to_transfer;
197 unsigned int req_transfer_count;
199 unsigned int bytes_transferred;
201 to_transfer = (status & STA_COUNT_MASK) >> STA_COUNT_SHIFT;
202 req_transfer_count = ch->req_transfer_count;
203 req_transfer_count += 1;
206 bytes_transferred = req_transfer_count;
208 if (status & STA_BUSY)
209 bytes_transferred -= to_transfer;
211 /* In continuous transfer mode, DMA only tracks the count of the
212 * half DMA buffer. So, if the DMA already finished half the DMA
213 * then add the half buffer to the completed count.
215 if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
216 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL)
217 bytes_transferred += req_transfer_count;
219 if (status & STA_ISE_EOC)
220 bytes_transferred += req_transfer_count;
222 bytes_transferred *= 4;
224 return bytes_transferred;
227 int tegra_dma_dequeue_req(struct tegra_dma_channel *ch,
228 struct tegra_dma_req *_req)
230 struct tegra_dma_req *req = NULL;
233 unsigned long irq_flags;
235 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
237 spin_lock_irqsave(&ch->lock, irq_flags);
239 if (list_entry(ch->list.next, struct tegra_dma_req, node) == _req)
242 list_for_each_entry(req, &ch->list, node) {
244 list_del(&req->node);
250 spin_unlock_irqrestore(&ch->lock, irq_flags);
257 /* STOP the DMA and get the transfer count.
258 * Getting the transfer count is tricky.
259 * - Globally disable DMA on all channels
260 * - Read the channel's status register to know the number of pending
261 * bytes to be transfered.
262 * - Stop the dma channel
263 * - Globally re-enable DMA to resume other transfers
265 spin_lock(&enable_lock);
266 writel(0, addr + APB_DMA_GEN);
268 status = readl(ch->addr + APB_DMA_CHAN_STA);
270 writel(GEN_ENABLE, addr + APB_DMA_GEN);
271 spin_unlock(&enable_lock);
273 req->bytes_transferred = dma_active_count(ch, req, status);
275 if (!list_empty(&ch->list)) {
276 /* if the list is not empty, queue the next request */
277 struct tegra_dma_req *next_req;
278 next_req = list_entry(ch->list.next,
279 typeof(*next_req), node);
280 tegra_dma_update_hw(ch, next_req);
283 req->status = -TEGRA_DMA_REQ_ERROR_ABORTED;
285 spin_unlock_irqrestore(&ch->lock, irq_flags);
287 /* Callback should be called without any lock */
291 EXPORT_SYMBOL(tegra_dma_dequeue_req);
293 bool tegra_dma_is_empty(struct tegra_dma_channel *ch)
295 unsigned long irq_flags;
298 spin_lock_irqsave(&ch->lock, irq_flags);
299 if (list_empty(&ch->list))
303 spin_unlock_irqrestore(&ch->lock, irq_flags);
306 EXPORT_SYMBOL(tegra_dma_is_empty);
308 bool tegra_dma_is_req_inflight(struct tegra_dma_channel *ch,
309 struct tegra_dma_req *_req)
311 unsigned long irq_flags;
312 struct tegra_dma_req *req;
314 spin_lock_irqsave(&ch->lock, irq_flags);
315 list_for_each_entry(req, &ch->list, node) {
317 spin_unlock_irqrestore(&ch->lock, irq_flags);
321 spin_unlock_irqrestore(&ch->lock, irq_flags);
324 EXPORT_SYMBOL(tegra_dma_is_req_inflight);
326 int tegra_dma_enqueue_req(struct tegra_dma_channel *ch,
327 struct tegra_dma_req *req)
329 unsigned long irq_flags;
332 if (req->size > TEGRA_DMA_MAX_TRANSFER_SIZE ||
333 req->source_addr & 0x3 || req->dest_addr & 0x3) {
334 pr_err("Invalid DMA request for channel %d\n", ch->id);
338 spin_lock_irqsave(&ch->lock, irq_flags);
340 req->bytes_transferred = 0;
342 /* STATUS_EMPTY just means the DMA hasn't processed the buf yet. */
343 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_EMPTY;
344 if (list_empty(&ch->list))
347 list_add_tail(&req->node, &ch->list);
350 tegra_dma_update_hw(ch, req);
351 /* Check to see if this request needs to be pushed immediately.
352 * For continuous single-buffer DMA:
353 * The first buffer is always in-flight. The 2nd buffer should
354 * also be in-flight. The 3rd buffer becomes in-flight when the
355 * first is completed in the interrupt.
357 else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE) {
358 struct tegra_dma_req *first_req, *second_req;
359 first_req = list_entry(ch->list.next,
360 typeof(*first_req), node);
361 second_req = list_entry(first_req->node.next,
362 typeof(*second_req), node);
363 if (second_req == req) {
364 unsigned long status =
365 readl(ch->addr + APB_DMA_CHAN_STA);
366 if (!(status & STA_ISE_EOC))
367 tegra_dma_update_hw_partial(ch, req);
368 /* Handle the case where the IRQ fired while we're
369 * writing the interrupts.
371 if (status & STA_ISE_EOC) {
372 /* Interrupt fired, let the IRQ stop/restart
373 * the DMA with this buffer in a clean way.
375 req->status = TEGRA_DMA_REQ_SUCCESS;
380 spin_unlock_irqrestore(&ch->lock, irq_flags);
384 EXPORT_SYMBOL(tegra_dma_enqueue_req);
386 struct tegra_dma_channel *tegra_dma_allocate_channel(int mode)
389 struct tegra_dma_channel *ch = NULL;
391 mutex_lock(&tegra_dma_lock);
393 /* first channel is the shared channel */
394 if (mode & TEGRA_DMA_SHARED) {
395 channel = TEGRA_SYSTEM_DMA_CH_MIN;
397 channel = find_first_zero_bit(channel_usage,
398 ARRAY_SIZE(dma_channels));
399 if (channel >= ARRAY_SIZE(dma_channels)) {
400 pr_err("%s: failed to allocate a DMA channel",
405 __set_bit(channel, channel_usage);
406 ch = &dma_channels[channel];
410 mutex_unlock(&tegra_dma_lock);
413 EXPORT_SYMBOL(tegra_dma_allocate_channel);
415 void tegra_dma_free_channel(struct tegra_dma_channel *ch)
417 if (ch->mode & TEGRA_DMA_SHARED)
419 tegra_dma_cancel(ch);
420 mutex_lock(&tegra_dma_lock);
421 __clear_bit(ch->id, channel_usage);
422 mutex_unlock(&tegra_dma_lock);
424 EXPORT_SYMBOL(tegra_dma_free_channel);
426 static void tegra_dma_update_hw_partial(struct tegra_dma_channel *ch,
427 struct tegra_dma_req *req)
433 if (req->to_memory) {
434 apb_ptr = req->source_addr;
435 ahb_ptr = req->dest_addr;
437 apb_ptr = req->dest_addr;
438 ahb_ptr = req->source_addr;
440 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
441 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
443 if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
444 ch->req_transfer_count = (req->size >> 3) - 1;
446 ch->req_transfer_count = (req->size >> 2) - 1;
447 csr = readl(ch->addr + APB_DMA_CHAN_CSR);
448 csr &= ~CSR_WCOUNT_MASK;
449 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
450 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
452 req->status = TEGRA_DMA_REQ_INFLIGHT;
456 static void tegra_dma_update_hw(struct tegra_dma_channel *ch,
457 struct tegra_dma_req *req)
471 csr = CSR_IE_EOC | CSR_FLOW;
472 ahb_seq = AHB_SEQ_INTR_ENB | AHB_SEQ_BURST_1;
475 csr |= req->req_sel << CSR_REQ_SEL_SHIFT;
477 ch->req_transfer_count = (req->size >> 2) - 1;
479 /* One shot mode is always single buffered. Continuous mode could
482 if (ch->mode & TEGRA_DMA_MODE_ONESHOT) {
484 } else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE) {
485 ahb_seq |= AHB_SEQ_DBL_BUF;
486 /* We want an interrupt halfway through, then on the
487 * completion. The double buffer means 2 interrupts
488 * pass before the DMA HW latches a new AHB_PTR etc.
490 ch->req_transfer_count = (req->size >> 3) - 1;
492 csr |= ch->req_transfer_count << CSR_WCOUNT_SHIFT;
494 if (req->to_memory) {
495 apb_ptr = req->source_addr;
496 ahb_ptr = req->dest_addr;
498 apb_addr_wrap = req->source_wrap;
499 ahb_addr_wrap = req->dest_wrap;
500 apb_bus_width = req->source_bus_width;
501 ahb_bus_width = req->dest_bus_width;
505 apb_ptr = req->dest_addr;
506 ahb_ptr = req->source_addr;
508 apb_addr_wrap = req->dest_wrap;
509 ahb_addr_wrap = req->source_wrap;
510 apb_bus_width = req->dest_bus_width;
511 ahb_bus_width = req->source_bus_width;
517 /* set address wrap for APB size */
520 if (apb_addr_wrap_table[index] == apb_addr_wrap)
523 } while (index < ARRAY_SIZE(apb_addr_wrap_table));
524 BUG_ON(index == ARRAY_SIZE(apb_addr_wrap_table));
525 apb_seq |= index << APB_SEQ_WRAP_SHIFT;
527 /* set address wrap for AHB size */
530 if (ahb_addr_wrap_table[index] == ahb_addr_wrap)
533 } while (index < ARRAY_SIZE(ahb_addr_wrap_table));
534 BUG_ON(index == ARRAY_SIZE(ahb_addr_wrap_table));
535 ahb_seq |= index << AHB_SEQ_WRAP_SHIFT;
537 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
538 if (bus_width_table[index] == ahb_bus_width)
541 BUG_ON(index == ARRAY_SIZE(bus_width_table));
542 ahb_seq |= index << AHB_SEQ_BUS_WIDTH_SHIFT;
544 for (index = 0; index < ARRAY_SIZE(bus_width_table); index++) {
545 if (bus_width_table[index] == apb_bus_width)
548 BUG_ON(index == ARRAY_SIZE(bus_width_table));
549 apb_seq |= index << APB_SEQ_BUS_WIDTH_SHIFT;
551 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
552 writel(apb_seq, ch->addr + APB_DMA_CHAN_APB_SEQ);
553 writel(apb_ptr, ch->addr + APB_DMA_CHAN_APB_PTR);
554 writel(ahb_seq, ch->addr + APB_DMA_CHAN_AHB_SEQ);
555 writel(ahb_ptr, ch->addr + APB_DMA_CHAN_AHB_PTR);
558 writel(csr, ch->addr + APB_DMA_CHAN_CSR);
560 req->status = TEGRA_DMA_REQ_INFLIGHT;
563 static void handle_oneshot_dma(struct tegra_dma_channel *ch)
565 struct tegra_dma_req *req;
566 unsigned long irq_flags;
568 spin_lock_irqsave(&ch->lock, irq_flags);
569 if (list_empty(&ch->list)) {
570 spin_unlock_irqrestore(&ch->lock, irq_flags);
574 req = list_entry(ch->list.next, typeof(*req), node);
576 list_del(&req->node);
577 req->bytes_transferred = req->size;
578 req->status = TEGRA_DMA_REQ_SUCCESS;
580 spin_unlock_irqrestore(&ch->lock, irq_flags);
581 /* Callback should be called without any lock */
582 pr_debug("%s: transferred %d bytes\n", __func__,
583 req->bytes_transferred);
585 spin_lock_irqsave(&ch->lock, irq_flags);
588 if (!list_empty(&ch->list)) {
589 req = list_entry(ch->list.next, typeof(*req), node);
590 /* the complete function we just called may have enqueued
591 another req, in which case dma has already started */
592 if (req->status != TEGRA_DMA_REQ_INFLIGHT)
593 tegra_dma_update_hw(ch, req);
595 spin_unlock_irqrestore(&ch->lock, irq_flags);
598 static void handle_continuous_dbl_dma(struct tegra_dma_channel *ch)
600 struct tegra_dma_req *req;
601 struct tegra_dma_req *next_req;
602 unsigned long irq_flags;
604 spin_lock_irqsave(&ch->lock, irq_flags);
605 if (list_empty(&ch->list)) {
606 spin_unlock_irqrestore(&ch->lock, irq_flags);
610 req = list_entry(ch->list.next, typeof(*req), node);
612 if (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_EMPTY) {
613 bool is_dma_ping_complete;
614 is_dma_ping_complete =
615 !!(readl(ch->addr + APB_DMA_CHAN_STA) &
618 is_dma_ping_complete = !is_dma_ping_complete;
619 /* Out of sync - Release current buffer */
620 if (!is_dma_ping_complete) {
622 TEGRA_DMA_REQ_BUF_STATUS_FULL;
623 req->bytes_transferred = req->size;
624 req->status = TEGRA_DMA_REQ_SUCCESS;
627 if (!list_is_last(&req->node, &ch->list)) {
628 next_req = list_entry(req->node.next,
629 typeof(*next_req), node);
630 tegra_dma_update_hw(ch, next_req);
633 list_del(&req->node);
635 /* DMA lock is NOT held when callbak is
637 spin_unlock_irqrestore(&ch->lock, irq_flags);
641 /* Load the next request into the hardware, if
643 if (!list_is_last(&req->node, &ch->list)) {
644 next_req = list_entry(req->node.next,
645 typeof(*next_req), node);
646 tegra_dma_update_hw_partial(ch, next_req);
648 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL;
649 req->status = TEGRA_DMA_REQ_SUCCESS;
650 /* DMA lock is NOT held when callback is called */
651 spin_unlock_irqrestore(&ch->lock, irq_flags);
652 if (likely(req->threshold))
656 } else if (req->buffer_status ==
657 TEGRA_DMA_REQ_BUF_STATUS_HALF_FULL) {
658 /* Callback when the buffer is completely full (i.e on
659 * the second interrupt */
661 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
662 req->bytes_transferred = req->size;
663 req->status = TEGRA_DMA_REQ_SUCCESS;
664 if (list_is_last(&req->node, &ch->list))
667 /* It may be possible that req came after
668 * half dma complete so it need to start
670 next_req = list_entry(req->node.next,
671 typeof(*next_req), node);
672 if (next_req->status !=
673 TEGRA_DMA_REQ_INFLIGHT) {
675 tegra_dma_update_hw(ch, next_req);
679 list_del(&req->node);
681 /* DMA lock is NOT held when callbak is called */
682 spin_unlock_irqrestore(&ch->lock, irq_flags);
688 /* Dma should be stop much earlier */
692 spin_unlock_irqrestore(&ch->lock, irq_flags);
695 static void handle_continuous_sngl_dma(struct tegra_dma_channel *ch)
697 struct tegra_dma_req *req;
698 struct tegra_dma_req *next_req;
699 struct tegra_dma_req *next_next_req;
700 unsigned long irq_flags;
702 spin_lock_irqsave(&ch->lock, irq_flags);
703 if (list_empty(&ch->list)) {
705 spin_unlock_irqrestore(&ch->lock, irq_flags);
706 pr_err("%s: No requests in the list.\n", __func__);
709 req = list_entry(ch->list.next, typeof(*req), node);
710 if (!req || (req->buffer_status == TEGRA_DMA_REQ_BUF_STATUS_FULL)) {
712 spin_unlock_irqrestore(&ch->lock, irq_flags);
713 pr_err("%s: DMA complete irq without corresponding req\n",
718 /* Handle the case when buffer is completely full */
719 req->bytes_transferred = req->size;
720 req->buffer_status = TEGRA_DMA_REQ_BUF_STATUS_FULL;
721 req->status = TEGRA_DMA_REQ_SUCCESS;
722 if (list_is_last(&req->node, &ch->list)) {
723 pr_debug("%s: stop\n", __func__);
726 /* The next entry should have already been queued and is now
727 * in the middle of xfer. We can then write the next->next one
730 next_req = list_entry(req->node.next, typeof(*next_req), node);
731 if (next_req->status != TEGRA_DMA_REQ_INFLIGHT) {
732 pr_warning("%s: interrupt during enqueue\n", __func__);
734 tegra_dma_update_hw(ch, next_req);
735 } else if (!list_is_last(&next_req->node, &ch->list)) {
736 next_next_req = list_entry(next_req->node.next,
737 typeof(*next_next_req), node);
738 tegra_dma_update_hw_partial(ch, next_next_req);
741 list_del(&req->node);
742 spin_unlock_irqrestore(&ch->lock, irq_flags);
746 static irqreturn_t dma_isr(int irq, void *data)
748 struct tegra_dma_channel *ch = data;
749 unsigned long status;
751 status = readl(ch->addr + APB_DMA_CHAN_STA);
752 if (status & STA_ISE_EOC)
753 writel(status, ch->addr + APB_DMA_CHAN_STA);
755 pr_warning("Got a spurious ISR for DMA channel %d\n", ch->id);
759 if (ch->mode & TEGRA_DMA_MODE_ONESHOT)
760 handle_oneshot_dma(ch);
761 else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_DOUBLE)
762 handle_continuous_dbl_dma(ch);
763 else if (ch->mode & TEGRA_DMA_MODE_CONTINUOUS_SINGLE)
764 handle_continuous_sngl_dma(ch);
766 pr_err("Bad channel mode for DMA ISR to handle\n");
770 int __init tegra_dma_init(void)
777 addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
778 writel(GEN_ENABLE, addr + APB_DMA_GEN);
779 writel(0, addr + APB_DMA_CNTRL);
780 writel(0xFFFFFFFFul >> (31 - TEGRA_SYSTEM_DMA_CH_MAX),
781 addr + APB_DMA_IRQ_MASK_SET);
783 memset(channel_usage, 0, sizeof(channel_usage));
784 memset(dma_channels, 0, sizeof(dma_channels));
786 /* Reserve all the channels we are not supposed to touch */
787 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_MIN; i++)
788 __set_bit(i, channel_usage);
790 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
791 struct tegra_dma_channel *ch = &dma_channels[i];
793 __clear_bit(i, channel_usage);
796 snprintf(ch->name, TEGRA_DMA_NAME_SIZE, "dma_channel_%d", i);
798 ch->addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
799 TEGRA_APB_DMA_CH0_SIZE * i);
801 spin_lock_init(&ch->lock);
802 INIT_LIST_HEAD(&ch->list);
804 irq = INT_APB_DMA_CH0 + i;
805 ret = request_irq(irq, dma_isr, 0, dma_channels[i].name, ch);
807 pr_err("Failed to register IRQ %d for DMA %d\n",
813 /* mark the shared channel allocated */
814 __set_bit(TEGRA_SYSTEM_DMA_CH_MIN, channel_usage);
816 for (i = TEGRA_SYSTEM_DMA_CH_MAX+1; i < NV_DMA_MAX_CHANNELS; i++)
817 __set_bit(i, channel_usage);
821 writel(0, addr + APB_DMA_GEN);
822 for (i = TEGRA_SYSTEM_DMA_CH_MIN; i <= TEGRA_SYSTEM_DMA_CH_MAX; i++) {
823 struct tegra_dma_channel *ch = &dma_channels[i];
825 free_irq(ch->irq, ch);
831 static u32 apb_dma[5*TEGRA_SYSTEM_DMA_CH_NR + 3];
833 void tegra_dma_suspend(void)
835 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
839 *ctx++ = readl(addr + APB_DMA_GEN);
840 *ctx++ = readl(addr + APB_DMA_CNTRL);
841 *ctx++ = readl(addr + APB_DMA_IRQ_MASK);
843 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
844 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
845 TEGRA_APB_DMA_CH0_SIZE * i);
847 *ctx++ = readl(addr + APB_DMA_CHAN_CSR);
848 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_PTR);
849 *ctx++ = readl(addr + APB_DMA_CHAN_AHB_SEQ);
850 *ctx++ = readl(addr + APB_DMA_CHAN_APB_PTR);
851 *ctx++ = readl(addr + APB_DMA_CHAN_APB_SEQ);
855 void tegra_dma_resume(void)
857 void __iomem *addr = IO_ADDRESS(TEGRA_APB_DMA_BASE);
861 writel(*ctx++, addr + APB_DMA_GEN);
862 writel(*ctx++, addr + APB_DMA_CNTRL);
863 writel(*ctx++, addr + APB_DMA_IRQ_MASK);
865 for (i = 0; i < TEGRA_SYSTEM_DMA_CH_NR; i++) {
866 addr = IO_ADDRESS(TEGRA_APB_DMA_CH0_BASE +
867 TEGRA_APB_DMA_CH0_SIZE * i);
869 writel(*ctx++, addr + APB_DMA_CHAN_CSR);
870 writel(*ctx++, addr + APB_DMA_CHAN_AHB_PTR);
871 writel(*ctx++, addr + APB_DMA_CHAN_AHB_SEQ);
872 writel(*ctx++, addr + APB_DMA_CHAN_APB_PTR);
873 writel(*ctx++, addr + APB_DMA_CHAN_APB_SEQ);