2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller,
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include <linux/of_device.h>
29 #include "at_hdmac_regs.h"
30 #include "dmaengine.h"
36 * at_hdmac : Name of the ATmel AHB DMA Controller
37 * at_dma_ / atdma : ATmel DMA controller entity related
38 * atc_ / atchan : ATmel DMA Channel entity related
41 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
42 #define ATC_DEFAULT_CTRLA (0)
43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
47 * Initial number of descriptors to allocate for each channel. This could
48 * be increased during dma usage.
50 static unsigned int init_nr_desc_per_channel = 64;
51 module_param(init_nr_desc_per_channel, uint, 0644);
52 MODULE_PARM_DESC(init_nr_desc_per_channel,
53 "initial descriptors per channel (default: 64)");
57 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
60 /*----------------------------------------------------------------------*/
62 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
64 return list_first_entry(&atchan->active_list,
65 struct at_desc, desc_node);
68 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
70 return list_first_entry(&atchan->queue,
71 struct at_desc, desc_node);
75 * atc_alloc_descriptor - allocate and return an initialized descriptor
76 * @chan: the channel to allocate descriptors for
77 * @gfp_flags: GFP allocation flags
79 * Note: The ack-bit is positioned in the descriptor flag at creation time
80 * to make initial allocation more convenient. This bit will be cleared
81 * and control will be given to client at usage time (during
82 * preparation functions).
84 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
87 struct at_desc *desc = NULL;
88 struct at_dma *atdma = to_at_dma(chan->device);
91 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
93 memset(desc, 0, sizeof(struct at_desc));
94 INIT_LIST_HEAD(&desc->tx_list);
95 dma_async_tx_descriptor_init(&desc->txd, chan);
96 /* txd.flags will be overwritten in prep functions */
97 desc->txd.flags = DMA_CTRL_ACK;
98 desc->txd.tx_submit = atc_tx_submit;
99 desc->txd.phys = phys;
106 * atc_desc_get - get an unused descriptor from free_list
107 * @atchan: channel we want a new descriptor for
109 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
111 struct at_desc *desc, *_desc;
112 struct at_desc *ret = NULL;
117 spin_lock_irqsave(&atchan->lock, flags);
118 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
120 if (async_tx_test_ack(&desc->txd)) {
121 list_del(&desc->desc_node);
125 dev_dbg(chan2dev(&atchan->chan_common),
126 "desc %p not ACKed\n", desc);
128 spin_unlock_irqrestore(&atchan->lock, flags);
129 dev_vdbg(chan2dev(&atchan->chan_common),
130 "scanned %u descriptors on freelist\n", i);
132 /* no more descriptor available in initial pool: create one more */
134 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
136 spin_lock_irqsave(&atchan->lock, flags);
137 atchan->descs_allocated++;
138 spin_unlock_irqrestore(&atchan->lock, flags);
140 dev_err(chan2dev(&atchan->chan_common),
141 "not enough descriptors available\n");
149 * atc_desc_put - move a descriptor, including any children, to the free list
150 * @atchan: channel we work on
151 * @desc: descriptor, at the head of a chain, to move to free list
153 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
156 struct at_desc *child;
159 spin_lock_irqsave(&atchan->lock, flags);
160 list_for_each_entry(child, &desc->tx_list, desc_node)
161 dev_vdbg(chan2dev(&atchan->chan_common),
162 "moving child desc %p to freelist\n",
164 list_splice_init(&desc->tx_list, &atchan->free_list);
165 dev_vdbg(chan2dev(&atchan->chan_common),
166 "moving desc %p to freelist\n", desc);
167 list_add(&desc->desc_node, &atchan->free_list);
168 spin_unlock_irqrestore(&atchan->lock, flags);
173 * atc_desc_chain - build chain adding a descripor
174 * @first: address of first descripor of the chain
175 * @prev: address of previous descripor of the chain
176 * @desc: descriptor to queue
178 * Called from prep_* functions
180 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
181 struct at_desc *desc)
186 /* inform the HW lli about chaining */
187 (*prev)->lli.dscr = desc->txd.phys;
188 /* insert the link descriptor to the LD ring */
189 list_add_tail(&desc->desc_node,
196 * atc_dostart - starts the DMA engine for real
197 * @atchan: the channel we want to start
198 * @first: first descriptor in the list we want to begin with
200 * Called with atchan->lock held and bh disabled
202 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
204 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
206 /* ASSERT: channel is idle */
207 if (atc_chan_is_enabled(atchan)) {
208 dev_err(chan2dev(&atchan->chan_common),
209 "BUG: Attempted to start non-idle channel\n");
210 dev_err(chan2dev(&atchan->chan_common),
211 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
212 channel_readl(atchan, SADDR),
213 channel_readl(atchan, DADDR),
214 channel_readl(atchan, CTRLA),
215 channel_readl(atchan, CTRLB),
216 channel_readl(atchan, DSCR));
218 /* The tasklet will hopefully advance the queue... */
222 vdbg_dump_regs(atchan);
224 channel_writel(atchan, SADDR, 0);
225 channel_writel(atchan, DADDR, 0);
226 channel_writel(atchan, CTRLA, 0);
227 channel_writel(atchan, CTRLB, 0);
228 channel_writel(atchan, DSCR, first->txd.phys);
229 dma_writel(atdma, CHER, atchan->mask);
231 vdbg_dump_regs(atchan);
235 * atc_chain_complete - finish work for one transaction chain
236 * @atchan: channel we work on
237 * @desc: descriptor at the head of the chain we want do complete
239 * Called with atchan->lock held and bh disabled */
241 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
243 struct dma_async_tx_descriptor *txd = &desc->txd;
245 dev_vdbg(chan2dev(&atchan->chan_common),
246 "descriptor %u complete\n", txd->cookie);
248 /* mark the descriptor as complete for non cyclic cases only */
249 if (!atc_chan_is_cyclic(atchan))
250 dma_cookie_complete(txd);
252 /* move children to free_list */
253 list_splice_init(&desc->tx_list, &atchan->free_list);
254 /* move myself to free_list */
255 list_move(&desc->desc_node, &atchan->free_list);
257 /* unmap dma addresses (not on slave channels) */
258 if (!atchan->chan_common.private) {
259 struct device *parent = chan2parent(&atchan->chan_common);
260 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
261 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
262 dma_unmap_single(parent,
264 desc->len, DMA_FROM_DEVICE);
266 dma_unmap_page(parent,
268 desc->len, DMA_FROM_DEVICE);
270 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
271 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
272 dma_unmap_single(parent,
274 desc->len, DMA_TO_DEVICE);
276 dma_unmap_page(parent,
278 desc->len, DMA_TO_DEVICE);
282 /* for cyclic transfers,
283 * no need to replay callback function while stopping */
284 if (!atc_chan_is_cyclic(atchan)) {
285 dma_async_tx_callback callback = txd->callback;
286 void *param = txd->callback_param;
289 * The API requires that no submissions are done from a
290 * callback, so we don't need to drop the lock here
296 dma_run_dependencies(txd);
300 * atc_complete_all - finish work for all transactions
301 * @atchan: channel to complete transactions for
303 * Eventually submit queued descriptors if any
305 * Assume channel is idle while calling this function
306 * Called with atchan->lock held and bh disabled
308 static void atc_complete_all(struct at_dma_chan *atchan)
310 struct at_desc *desc, *_desc;
313 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
315 BUG_ON(atc_chan_is_enabled(atchan));
318 * Submit queued descriptors ASAP, i.e. before we go through
319 * the completed ones.
321 if (!list_empty(&atchan->queue))
322 atc_dostart(atchan, atc_first_queued(atchan));
323 /* empty active_list now it is completed */
324 list_splice_init(&atchan->active_list, &list);
325 /* empty queue list by moving descriptors (if any) to active_list */
326 list_splice_init(&atchan->queue, &atchan->active_list);
328 list_for_each_entry_safe(desc, _desc, &list, desc_node)
329 atc_chain_complete(atchan, desc);
333 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
334 * @atchan: channel to be cleaned up
336 * Called with atchan->lock held and bh disabled
338 static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
340 struct at_desc *desc, *_desc;
341 struct at_desc *child;
343 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
345 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
346 if (!(desc->lli.ctrla & ATC_DONE))
347 /* This one is currently in progress */
350 list_for_each_entry(child, &desc->tx_list, desc_node)
351 if (!(child->lli.ctrla & ATC_DONE))
352 /* Currently in progress */
356 * No descriptors so far seem to be in progress, i.e.
357 * this chain must be done.
359 atc_chain_complete(atchan, desc);
364 * atc_advance_work - at the end of a transaction, move forward
365 * @atchan: channel where the transaction ended
367 * Called with atchan->lock held and bh disabled
369 static void atc_advance_work(struct at_dma_chan *atchan)
371 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
373 if (list_empty(&atchan->active_list) ||
374 list_is_singular(&atchan->active_list)) {
375 atc_complete_all(atchan);
377 atc_chain_complete(atchan, atc_first_active(atchan));
379 atc_dostart(atchan, atc_first_active(atchan));
385 * atc_handle_error - handle errors reported by DMA controller
386 * @atchan: channel where error occurs
388 * Called with atchan->lock held and bh disabled
390 static void atc_handle_error(struct at_dma_chan *atchan)
392 struct at_desc *bad_desc;
393 struct at_desc *child;
396 * The descriptor currently at the head of the active list is
397 * broked. Since we don't have any way to report errors, we'll
398 * just have to scream loudly and try to carry on.
400 bad_desc = atc_first_active(atchan);
401 list_del_init(&bad_desc->desc_node);
403 /* As we are stopped, take advantage to push queued descriptors
405 list_splice_init(&atchan->queue, atchan->active_list.prev);
407 /* Try to restart the controller */
408 if (!list_empty(&atchan->active_list))
409 atc_dostart(atchan, atc_first_active(atchan));
412 * KERN_CRITICAL may seem harsh, but since this only happens
413 * when someone submits a bad physical address in a
414 * descriptor, we should consider ourselves lucky that the
415 * controller flagged an error instead of scribbling over
416 * random memory locations.
418 dev_crit(chan2dev(&atchan->chan_common),
419 "Bad descriptor submitted for DMA!\n");
420 dev_crit(chan2dev(&atchan->chan_common),
421 " cookie: %d\n", bad_desc->txd.cookie);
422 atc_dump_lli(atchan, &bad_desc->lli);
423 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
424 atc_dump_lli(atchan, &child->lli);
426 /* Pretend the descriptor completed successfully */
427 atc_chain_complete(atchan, bad_desc);
431 * atc_handle_cyclic - at the end of a period, run callback function
432 * @atchan: channel used for cyclic operations
434 * Called with atchan->lock held and bh disabled
436 static void atc_handle_cyclic(struct at_dma_chan *atchan)
438 struct at_desc *first = atc_first_active(atchan);
439 struct dma_async_tx_descriptor *txd = &first->txd;
440 dma_async_tx_callback callback = txd->callback;
441 void *param = txd->callback_param;
443 dev_vdbg(chan2dev(&atchan->chan_common),
444 "new cyclic period llp 0x%08x\n",
445 channel_readl(atchan, DSCR));
451 /*-- IRQ & Tasklet ---------------------------------------------------*/
453 static void atc_tasklet(unsigned long data)
455 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
458 spin_lock_irqsave(&atchan->lock, flags);
459 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
460 atc_handle_error(atchan);
461 else if (atc_chan_is_cyclic(atchan))
462 atc_handle_cyclic(atchan);
464 atc_advance_work(atchan);
466 spin_unlock_irqrestore(&atchan->lock, flags);
469 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
471 struct at_dma *atdma = (struct at_dma *)dev_id;
472 struct at_dma_chan *atchan;
474 u32 status, pending, imr;
478 imr = dma_readl(atdma, EBCIMR);
479 status = dma_readl(atdma, EBCISR);
480 pending = status & imr;
485 dev_vdbg(atdma->dma_common.dev,
486 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
487 status, imr, pending);
489 for (i = 0; i < atdma->dma_common.chancnt; i++) {
490 atchan = &atdma->chan[i];
491 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
492 if (pending & AT_DMA_ERR(i)) {
493 /* Disable channel on AHB error */
494 dma_writel(atdma, CHDR,
495 AT_DMA_RES(i) | atchan->mask);
496 /* Give information to tasklet */
497 set_bit(ATC_IS_ERROR, &atchan->status);
499 tasklet_schedule(&atchan->tasklet);
510 /*-- DMA Engine API --------------------------------------------------*/
513 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
514 * @desc: descriptor at the head of the transaction chain
516 * Queue chain if DMA engine is working already
518 * Cookie increment and adding to active_list or queue must be atomic
520 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
522 struct at_desc *desc = txd_to_at_desc(tx);
523 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
527 spin_lock_irqsave(&atchan->lock, flags);
528 cookie = dma_cookie_assign(tx);
530 if (list_empty(&atchan->active_list)) {
531 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
533 atc_dostart(atchan, desc);
534 list_add_tail(&desc->desc_node, &atchan->active_list);
536 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
538 list_add_tail(&desc->desc_node, &atchan->queue);
541 spin_unlock_irqrestore(&atchan->lock, flags);
547 * atc_prep_dma_memcpy - prepare a memcpy operation
548 * @chan: the channel to prepare operation on
549 * @dest: operation virtual destination address
550 * @src: operation virtual source address
551 * @len: operation length
552 * @flags: tx descriptor status flags
554 static struct dma_async_tx_descriptor *
555 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
556 size_t len, unsigned long flags)
558 struct at_dma_chan *atchan = to_at_dma_chan(chan);
559 struct at_desc *desc = NULL;
560 struct at_desc *first = NULL;
561 struct at_desc *prev = NULL;
564 unsigned int src_width;
565 unsigned int dst_width;
569 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
570 dest, src, len, flags);
572 if (unlikely(!len)) {
573 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
577 ctrla = ATC_DEFAULT_CTRLA;
578 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
579 | ATC_SRC_ADDR_MODE_INCR
580 | ATC_DST_ADDR_MODE_INCR
584 * We can be a lot more clever here, but this should take care
585 * of the most common optimization.
587 if (!((src | dest | len) & 3)) {
588 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
589 src_width = dst_width = 2;
590 } else if (!((src | dest | len) & 1)) {
591 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
592 src_width = dst_width = 1;
594 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
595 src_width = dst_width = 0;
598 for (offset = 0; offset < len; offset += xfer_count << src_width) {
599 xfer_count = min_t(size_t, (len - offset) >> src_width,
602 desc = atc_desc_get(atchan);
606 desc->lli.saddr = src + offset;
607 desc->lli.daddr = dest + offset;
608 desc->lli.ctrla = ctrla | xfer_count;
609 desc->lli.ctrlb = ctrlb;
611 desc->txd.cookie = 0;
613 atc_desc_chain(&first, &prev, desc);
616 /* First descriptor of the chain embedds additional information */
617 first->txd.cookie = -EBUSY;
620 /* set end-of-link to the last link descriptor of list*/
623 first->txd.flags = flags; /* client is in control of this ack */
628 atc_desc_put(atchan, first);
634 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
636 * @sgl: scatterlist to transfer to/from
637 * @sg_len: number of entries in @scatterlist
638 * @direction: DMA direction
639 * @flags: tx descriptor status flags
640 * @context: transaction context (ignored)
642 static struct dma_async_tx_descriptor *
643 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
644 unsigned int sg_len, enum dma_transfer_direction direction,
645 unsigned long flags, void *context)
647 struct at_dma_chan *atchan = to_at_dma_chan(chan);
648 struct at_dma_slave *atslave = chan->private;
649 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
650 struct at_desc *first = NULL;
651 struct at_desc *prev = NULL;
655 unsigned int reg_width;
656 unsigned int mem_width;
658 struct scatterlist *sg;
659 size_t total_len = 0;
661 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
663 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
666 if (unlikely(!atslave || !sg_len)) {
667 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
671 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
676 reg_width = convert_buswidth(sconfig->dst_addr_width);
677 ctrla |= ATC_DST_WIDTH(reg_width);
678 ctrlb |= ATC_DST_ADDR_MODE_FIXED
679 | ATC_SRC_ADDR_MODE_INCR
681 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
682 reg = sconfig->dst_addr;
683 for_each_sg(sgl, sg, sg_len, i) {
684 struct at_desc *desc;
688 desc = atc_desc_get(atchan);
692 mem = sg_dma_address(sg);
693 len = sg_dma_len(sg);
695 if (unlikely(mem & 3 || len & 3))
698 desc->lli.saddr = mem;
699 desc->lli.daddr = reg;
700 desc->lli.ctrla = ctrla
701 | ATC_SRC_WIDTH(mem_width)
703 desc->lli.ctrlb = ctrlb;
705 atc_desc_chain(&first, &prev, desc);
710 reg_width = convert_buswidth(sconfig->src_addr_width);
711 ctrla |= ATC_SRC_WIDTH(reg_width);
712 ctrlb |= ATC_DST_ADDR_MODE_INCR
713 | ATC_SRC_ADDR_MODE_FIXED
715 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
717 reg = sconfig->src_addr;
718 for_each_sg(sgl, sg, sg_len, i) {
719 struct at_desc *desc;
723 desc = atc_desc_get(atchan);
727 mem = sg_dma_address(sg);
728 len = sg_dma_len(sg);
730 if (unlikely(mem & 3 || len & 3))
733 desc->lli.saddr = reg;
734 desc->lli.daddr = mem;
735 desc->lli.ctrla = ctrla
736 | ATC_DST_WIDTH(mem_width)
738 desc->lli.ctrlb = ctrlb;
740 atc_desc_chain(&first, &prev, desc);
748 /* set end-of-link to the last link descriptor of list*/
751 /* First descriptor of the chain embedds additional information */
752 first->txd.cookie = -EBUSY;
753 first->len = total_len;
755 /* first link descriptor of list is responsible of flags */
756 first->txd.flags = flags; /* client is in control of this ack */
761 dev_err(chan2dev(chan), "not enough descriptors available\n");
762 atc_desc_put(atchan, first);
767 * atc_dma_cyclic_check_values
768 * Check for too big/unaligned periods and unaligned DMA buffer
771 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
772 size_t period_len, enum dma_transfer_direction direction)
774 if (period_len > (ATC_BTSIZE_MAX << reg_width))
776 if (unlikely(period_len & ((1 << reg_width) - 1)))
778 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
780 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
790 * atc_dma_cyclic_fill_desc - Fill one period decriptor
793 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
794 unsigned int period_index, dma_addr_t buf_addr,
795 unsigned int reg_width, size_t period_len,
796 enum dma_transfer_direction direction)
798 struct at_dma_chan *atchan = to_at_dma_chan(chan);
799 struct at_dma_slave *atslave = chan->private;
800 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
803 /* prepare common CRTLA value */
804 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
805 | ATC_DST_WIDTH(reg_width)
806 | ATC_SRC_WIDTH(reg_width)
807 | period_len >> reg_width;
811 desc->lli.saddr = buf_addr + (period_len * period_index);
812 desc->lli.daddr = sconfig->dst_addr;
813 desc->lli.ctrla = ctrla;
814 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
815 | ATC_SRC_ADDR_MODE_INCR
817 | ATC_SIF(AT_DMA_MEM_IF)
818 | ATC_DIF(AT_DMA_PER_IF);
822 desc->lli.saddr = sconfig->src_addr;
823 desc->lli.daddr = buf_addr + (period_len * period_index);
824 desc->lli.ctrla = ctrla;
825 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
826 | ATC_SRC_ADDR_MODE_FIXED
828 | ATC_SIF(AT_DMA_PER_IF)
829 | ATC_DIF(AT_DMA_MEM_IF);
840 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
841 * @chan: the DMA channel to prepare
842 * @buf_addr: physical DMA address where the buffer starts
843 * @buf_len: total number of bytes for the entire buffer
844 * @period_len: number of bytes for each period
845 * @direction: transfer direction, to or from device
846 * @context: transfer context (ignored)
848 static struct dma_async_tx_descriptor *
849 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
850 size_t period_len, enum dma_transfer_direction direction,
853 struct at_dma_chan *atchan = to_at_dma_chan(chan);
854 struct at_dma_slave *atslave = chan->private;
855 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
856 struct at_desc *first = NULL;
857 struct at_desc *prev = NULL;
858 unsigned long was_cyclic;
859 unsigned int reg_width;
860 unsigned int periods = buf_len / period_len;
863 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
864 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
866 periods, buf_len, period_len);
868 if (unlikely(!atslave || !buf_len || !period_len)) {
869 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
873 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
875 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
879 if (sconfig->direction == DMA_MEM_TO_DEV)
880 reg_width = convert_buswidth(sconfig->dst_addr_width);
882 reg_width = convert_buswidth(sconfig->src_addr_width);
884 /* Check for too big/unaligned periods and unaligned DMA buffer */
885 if (atc_dma_cyclic_check_values(reg_width, buf_addr,
886 period_len, direction))
889 /* build cyclic linked list */
890 for (i = 0; i < periods; i++) {
891 struct at_desc *desc;
893 desc = atc_desc_get(atchan);
897 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
898 reg_width, period_len, direction))
901 atc_desc_chain(&first, &prev, desc);
904 /* lets make a cyclic list */
905 prev->lli.dscr = first->txd.phys;
907 /* First descriptor of the chain embedds additional information */
908 first->txd.cookie = -EBUSY;
909 first->len = buf_len;
914 dev_err(chan2dev(chan), "not enough descriptors available\n");
915 atc_desc_put(atchan, first);
917 clear_bit(ATC_IS_CYCLIC, &atchan->status);
921 static int set_runtime_config(struct dma_chan *chan,
922 struct dma_slave_config *sconfig)
924 struct at_dma_chan *atchan = to_at_dma_chan(chan);
926 /* Check if it is chan is configured for slave transfers */
930 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
932 convert_burst(&atchan->dma_sconfig.src_maxburst);
933 convert_burst(&atchan->dma_sconfig.dst_maxburst);
939 static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
942 struct at_dma_chan *atchan = to_at_dma_chan(chan);
943 struct at_dma *atdma = to_at_dma(chan->device);
944 int chan_id = atchan->chan_common.chan_id;
949 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
951 if (cmd == DMA_PAUSE) {
952 spin_lock_irqsave(&atchan->lock, flags);
954 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
955 set_bit(ATC_IS_PAUSED, &atchan->status);
957 spin_unlock_irqrestore(&atchan->lock, flags);
958 } else if (cmd == DMA_RESUME) {
959 if (!atc_chan_is_paused(atchan))
962 spin_lock_irqsave(&atchan->lock, flags);
964 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
965 clear_bit(ATC_IS_PAUSED, &atchan->status);
967 spin_unlock_irqrestore(&atchan->lock, flags);
968 } else if (cmd == DMA_TERMINATE_ALL) {
969 struct at_desc *desc, *_desc;
971 * This is only called when something went wrong elsewhere, so
972 * we don't really care about the data. Just disable the
973 * channel. We still have to poll the channel enable bit due
974 * to AHB/HSB limitations.
976 spin_lock_irqsave(&atchan->lock, flags);
978 /* disabling channel: must also remove suspend state */
979 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
981 /* confirm that this channel is disabled */
982 while (dma_readl(atdma, CHSR) & atchan->mask)
985 /* active_list entries will end up before queued entries */
986 list_splice_init(&atchan->queue, &list);
987 list_splice_init(&atchan->active_list, &list);
989 /* Flush all pending and queued descriptors */
990 list_for_each_entry_safe(desc, _desc, &list, desc_node)
991 atc_chain_complete(atchan, desc);
993 clear_bit(ATC_IS_PAUSED, &atchan->status);
994 /* if channel dedicated to cyclic operations, free it */
995 clear_bit(ATC_IS_CYCLIC, &atchan->status);
997 spin_unlock_irqrestore(&atchan->lock, flags);
998 } else if (cmd == DMA_SLAVE_CONFIG) {
999 return set_runtime_config(chan, (struct dma_slave_config *)arg);
1008 * atc_tx_status - poll for transaction completion
1009 * @chan: DMA channel
1010 * @cookie: transaction identifier to check status of
1011 * @txstate: if not %NULL updated with transaction state
1013 * If @txstate is passed in, upon return it reflect the driver
1014 * internal state and can be used with dma_async_is_complete() to check
1015 * the status of multiple cookies without re-checking hardware state.
1017 static enum dma_status
1018 atc_tx_status(struct dma_chan *chan,
1019 dma_cookie_t cookie,
1020 struct dma_tx_state *txstate)
1022 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1023 dma_cookie_t last_used;
1024 dma_cookie_t last_complete;
1025 unsigned long flags;
1026 enum dma_status ret;
1028 spin_lock_irqsave(&atchan->lock, flags);
1030 ret = dma_cookie_status(chan, cookie, txstate);
1031 if (ret != DMA_SUCCESS) {
1032 atc_cleanup_descriptors(atchan);
1034 ret = dma_cookie_status(chan, cookie, txstate);
1037 last_complete = chan->completed_cookie;
1038 last_used = chan->cookie;
1040 spin_unlock_irqrestore(&atchan->lock, flags);
1042 if (ret != DMA_SUCCESS)
1043 dma_set_residue(txstate, atc_first_active(atchan)->len);
1045 if (atc_chan_is_paused(atchan))
1048 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1049 ret, cookie, last_complete ? last_complete : 0,
1050 last_used ? last_used : 0);
1056 * atc_issue_pending - try to finish work
1057 * @chan: target DMA channel
1059 static void atc_issue_pending(struct dma_chan *chan)
1061 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1062 unsigned long flags;
1064 dev_vdbg(chan2dev(chan), "issue_pending\n");
1066 /* Not needed for cyclic transfers */
1067 if (atc_chan_is_cyclic(atchan))
1070 spin_lock_irqsave(&atchan->lock, flags);
1071 if (!atc_chan_is_enabled(atchan)) {
1072 atc_advance_work(atchan);
1074 spin_unlock_irqrestore(&atchan->lock, flags);
1078 * atc_alloc_chan_resources - allocate resources for DMA channel
1079 * @chan: allocate descriptor resources for this channel
1080 * @client: current client requesting the channel be ready for requests
1082 * return - the number of allocated descriptors
1084 static int atc_alloc_chan_resources(struct dma_chan *chan)
1086 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1087 struct at_dma *atdma = to_at_dma(chan->device);
1088 struct at_desc *desc;
1089 struct at_dma_slave *atslave;
1090 unsigned long flags;
1093 LIST_HEAD(tmp_list);
1095 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1097 /* ASSERT: channel is idle */
1098 if (atc_chan_is_enabled(atchan)) {
1099 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1103 cfg = ATC_DEFAULT_CFG;
1105 atslave = chan->private;
1108 * We need controller-specific data to set up slave
1111 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1113 /* if cfg configuration specified take it instad of default */
1118 /* have we already been set up?
1119 * reconfigure channel but no need to reallocate descriptors */
1120 if (!list_empty(&atchan->free_list))
1121 return atchan->descs_allocated;
1123 /* Allocate initial pool of descriptors */
1124 for (i = 0; i < init_nr_desc_per_channel; i++) {
1125 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1127 dev_err(atdma->dma_common.dev,
1128 "Only %d initial descriptors\n", i);
1131 list_add_tail(&desc->desc_node, &tmp_list);
1134 spin_lock_irqsave(&atchan->lock, flags);
1135 atchan->descs_allocated = i;
1136 list_splice(&tmp_list, &atchan->free_list);
1137 dma_cookie_init(chan);
1138 spin_unlock_irqrestore(&atchan->lock, flags);
1140 /* channel parameters */
1141 channel_writel(atchan, CFG, cfg);
1143 dev_dbg(chan2dev(chan),
1144 "alloc_chan_resources: allocated %d descriptors\n",
1145 atchan->descs_allocated);
1147 return atchan->descs_allocated;
1151 * atc_free_chan_resources - free all channel resources
1152 * @chan: DMA channel
1154 static void atc_free_chan_resources(struct dma_chan *chan)
1156 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1157 struct at_dma *atdma = to_at_dma(chan->device);
1158 struct at_desc *desc, *_desc;
1161 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1162 atchan->descs_allocated);
1164 /* ASSERT: channel is idle */
1165 BUG_ON(!list_empty(&atchan->active_list));
1166 BUG_ON(!list_empty(&atchan->queue));
1167 BUG_ON(atc_chan_is_enabled(atchan));
1169 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1170 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1171 list_del(&desc->desc_node);
1172 /* free link descriptor */
1173 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1175 list_splice_init(&atchan->free_list, &list);
1176 atchan->descs_allocated = 0;
1179 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1183 /*-- Module Management -----------------------------------------------*/
1185 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1186 static struct at_dma_platform_data at91sam9rl_config = {
1189 static struct at_dma_platform_data at91sam9g45_config = {
1193 #if defined(CONFIG_OF)
1194 static const struct of_device_id atmel_dma_dt_ids[] = {
1196 .compatible = "atmel,at91sam9rl-dma",
1197 .data = &at91sam9rl_config,
1199 .compatible = "atmel,at91sam9g45-dma",
1200 .data = &at91sam9g45_config,
1206 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1209 static const struct platform_device_id atdma_devtypes[] = {
1211 .name = "at91sam9rl_dma",
1212 .driver_data = (unsigned long) &at91sam9rl_config,
1214 .name = "at91sam9g45_dma",
1215 .driver_data = (unsigned long) &at91sam9g45_config,
1221 static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
1222 struct platform_device *pdev)
1224 if (pdev->dev.of_node) {
1225 const struct of_device_id *match;
1226 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1231 return (struct at_dma_platform_data *)
1232 platform_get_device_id(pdev)->driver_data;
1236 * at_dma_off - disable DMA controller
1237 * @atdma: the Atmel HDAMC device
1239 static void at_dma_off(struct at_dma *atdma)
1241 dma_writel(atdma, EN, 0);
1243 /* disable all interrupts */
1244 dma_writel(atdma, EBCIDR, -1L);
1246 /* confirm that all channels are disabled */
1247 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1251 static int __init at_dma_probe(struct platform_device *pdev)
1253 struct resource *io;
1254 struct at_dma *atdma;
1259 struct at_dma_platform_data *plat_dat;
1261 /* setup platform data for each SoC */
1262 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1263 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1264 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1266 /* get DMA parameters from controller type */
1267 plat_dat = at_dma_get_driver_data(pdev);
1271 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1275 irq = platform_get_irq(pdev, 0);
1279 size = sizeof(struct at_dma);
1280 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1281 atdma = kzalloc(size, GFP_KERNEL);
1285 /* discover transaction capabilities */
1286 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1287 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1289 size = resource_size(io);
1290 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1295 atdma->regs = ioremap(io->start, size);
1301 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1302 if (IS_ERR(atdma->clk)) {
1303 err = PTR_ERR(atdma->clk);
1306 clk_enable(atdma->clk);
1308 /* force dma off, just in case */
1311 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1315 platform_set_drvdata(pdev, atdma);
1317 /* create a pool of consistent memory blocks for hardware descriptors */
1318 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1319 &pdev->dev, sizeof(struct at_desc),
1320 4 /* word alignment */, 0);
1321 if (!atdma->dma_desc_pool) {
1322 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1324 goto err_pool_create;
1327 /* clear any pending interrupt */
1328 while (dma_readl(atdma, EBCISR))
1331 /* initialize channels related values */
1332 INIT_LIST_HEAD(&atdma->dma_common.channels);
1333 for (i = 0; i < plat_dat->nr_channels; i++) {
1334 struct at_dma_chan *atchan = &atdma->chan[i];
1336 atchan->chan_common.device = &atdma->dma_common;
1337 dma_cookie_init(&atchan->chan_common);
1338 list_add_tail(&atchan->chan_common.device_node,
1339 &atdma->dma_common.channels);
1341 atchan->ch_regs = atdma->regs + ch_regs(i);
1342 spin_lock_init(&atchan->lock);
1343 atchan->mask = 1 << i;
1345 INIT_LIST_HEAD(&atchan->active_list);
1346 INIT_LIST_HEAD(&atchan->queue);
1347 INIT_LIST_HEAD(&atchan->free_list);
1349 tasklet_init(&atchan->tasklet, atc_tasklet,
1350 (unsigned long)atchan);
1351 atc_enable_chan_irq(atdma, i);
1354 /* set base routines */
1355 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1356 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1357 atdma->dma_common.device_tx_status = atc_tx_status;
1358 atdma->dma_common.device_issue_pending = atc_issue_pending;
1359 atdma->dma_common.dev = &pdev->dev;
1361 /* set prep routines based on capability */
1362 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1363 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1365 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1366 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1367 /* controller can do slave DMA: can trigger cyclic transfers */
1368 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1369 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1370 atdma->dma_common.device_control = atc_control;
1373 dma_writel(atdma, EN, AT_DMA_ENABLE);
1375 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1376 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1377 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1378 plat_dat->nr_channels);
1380 dma_async_device_register(&atdma->dma_common);
1385 platform_set_drvdata(pdev, NULL);
1386 free_irq(platform_get_irq(pdev, 0), atdma);
1388 clk_disable(atdma->clk);
1389 clk_put(atdma->clk);
1391 iounmap(atdma->regs);
1394 release_mem_region(io->start, size);
1400 static int __exit at_dma_remove(struct platform_device *pdev)
1402 struct at_dma *atdma = platform_get_drvdata(pdev);
1403 struct dma_chan *chan, *_chan;
1404 struct resource *io;
1407 dma_async_device_unregister(&atdma->dma_common);
1409 dma_pool_destroy(atdma->dma_desc_pool);
1410 platform_set_drvdata(pdev, NULL);
1411 free_irq(platform_get_irq(pdev, 0), atdma);
1413 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1415 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1417 /* Disable interrupts */
1418 atc_disable_chan_irq(atdma, chan->chan_id);
1419 tasklet_disable(&atchan->tasklet);
1421 tasklet_kill(&atchan->tasklet);
1422 list_del(&chan->device_node);
1425 clk_disable(atdma->clk);
1426 clk_put(atdma->clk);
1428 iounmap(atdma->regs);
1431 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1432 release_mem_region(io->start, resource_size(io));
1439 static void at_dma_shutdown(struct platform_device *pdev)
1441 struct at_dma *atdma = platform_get_drvdata(pdev);
1443 at_dma_off(platform_get_drvdata(pdev));
1444 clk_disable(atdma->clk);
1447 static int at_dma_prepare(struct device *dev)
1449 struct platform_device *pdev = to_platform_device(dev);
1450 struct at_dma *atdma = platform_get_drvdata(pdev);
1451 struct dma_chan *chan, *_chan;
1453 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1455 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1456 /* wait for transaction completion (except in cyclic case) */
1457 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
1463 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1465 struct dma_chan *chan = &atchan->chan_common;
1467 /* Channel should be paused by user
1468 * do it anyway even if it is not done already */
1469 if (!atc_chan_is_paused(atchan)) {
1470 dev_warn(chan2dev(chan),
1471 "cyclic channel not paused, should be done by channel user\n");
1472 atc_control(chan, DMA_PAUSE, 0);
1475 /* now preserve additional data for cyclic operations */
1476 /* next descriptor address in the cyclic list */
1477 atchan->save_dscr = channel_readl(atchan, DSCR);
1479 vdbg_dump_regs(atchan);
1482 static int at_dma_suspend_noirq(struct device *dev)
1484 struct platform_device *pdev = to_platform_device(dev);
1485 struct at_dma *atdma = platform_get_drvdata(pdev);
1486 struct dma_chan *chan, *_chan;
1489 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1491 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1493 if (atc_chan_is_cyclic(atchan))
1494 atc_suspend_cyclic(atchan);
1495 atchan->save_cfg = channel_readl(atchan, CFG);
1497 atdma->save_imr = dma_readl(atdma, EBCIMR);
1499 /* disable DMA controller */
1501 clk_disable(atdma->clk);
1505 static void atc_resume_cyclic(struct at_dma_chan *atchan)
1507 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1509 /* restore channel status for cyclic descriptors list:
1510 * next descriptor in the cyclic list at the time of suspend */
1511 channel_writel(atchan, SADDR, 0);
1512 channel_writel(atchan, DADDR, 0);
1513 channel_writel(atchan, CTRLA, 0);
1514 channel_writel(atchan, CTRLB, 0);
1515 channel_writel(atchan, DSCR, atchan->save_dscr);
1516 dma_writel(atdma, CHER, atchan->mask);
1518 /* channel pause status should be removed by channel user
1519 * We cannot take the initiative to do it here */
1521 vdbg_dump_regs(atchan);
1524 static int at_dma_resume_noirq(struct device *dev)
1526 struct platform_device *pdev = to_platform_device(dev);
1527 struct at_dma *atdma = platform_get_drvdata(pdev);
1528 struct dma_chan *chan, *_chan;
1530 /* bring back DMA controller */
1531 clk_enable(atdma->clk);
1532 dma_writel(atdma, EN, AT_DMA_ENABLE);
1534 /* clear any pending interrupt */
1535 while (dma_readl(atdma, EBCISR))
1538 /* restore saved data */
1539 dma_writel(atdma, EBCIER, atdma->save_imr);
1540 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1542 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1544 channel_writel(atchan, CFG, atchan->save_cfg);
1545 if (atc_chan_is_cyclic(atchan))
1546 atc_resume_cyclic(atchan);
1551 static const struct dev_pm_ops at_dma_dev_pm_ops = {
1552 .prepare = at_dma_prepare,
1553 .suspend_noirq = at_dma_suspend_noirq,
1554 .resume_noirq = at_dma_resume_noirq,
1557 static struct platform_driver at_dma_driver = {
1558 .remove = __exit_p(at_dma_remove),
1559 .shutdown = at_dma_shutdown,
1560 .id_table = atdma_devtypes,
1563 .pm = &at_dma_dev_pm_ops,
1564 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
1568 static int __init at_dma_init(void)
1570 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1572 subsys_initcall(at_dma_init);
1574 static void __exit at_dma_exit(void)
1576 platform_driver_unregister(&at_dma_driver);
1578 module_exit(at_dma_exit);
1580 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1581 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1582 MODULE_LICENSE("GPL");
1583 MODULE_ALIAS("platform:at_hdmac");