2 * Copyright 2012 Marvell International Ltd.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/slab.h>
15 #include <linux/dmaengine.h>
16 #include <linux/platform_device.h>
17 #include <linux/device.h>
18 #include <linux/platform_data/mmp_dma.h>
19 #include <linux/dmapool.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
23 #include <linux/dma/mmp-pdma.h>
25 #include "dmaengine.h"
35 #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */
36 #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */
37 #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */
38 #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */
39 #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */
40 #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */
41 #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */
42 #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */
44 #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */
45 #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */
46 #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */
47 #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */
48 #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */
49 #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */
50 #define DCSR_EORINTR (1 << 9) /* The end of Receive */
52 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \
54 #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */
55 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
58 #define DDADR_STOP (1 << 0) /* Stop (read / write) */
60 #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */
61 #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */
62 #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */
63 #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */
64 #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */
65 #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */
66 #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */
67 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
68 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
69 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
70 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
71 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
72 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
73 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75 #define PDMA_ALIGNMENT 3
76 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
78 struct mmp_pdma_desc_hw {
79 u32 ddadr; /* Points to the next descriptor + flags */
80 u32 dsadr; /* DSADR value for the current transfer */
81 u32 dtadr; /* DTADR value for the current transfer */
82 u32 dcmd; /* DCMD value for the current transfer */
85 struct mmp_pdma_desc_sw {
86 struct mmp_pdma_desc_hw desc;
87 struct list_head node;
88 struct list_head tx_list;
89 struct dma_async_tx_descriptor async_tx;
94 struct mmp_pdma_chan {
97 struct dma_async_tx_descriptor desc;
98 struct mmp_pdma_phy *phy;
99 enum dma_transfer_direction dir;
101 /* channel's basic info */
102 struct tasklet_struct tasklet;
108 spinlock_t desc_lock; /* Descriptor list lock */
109 struct list_head chain_pending; /* Link descriptors queue for pending */
110 struct list_head chain_running; /* Link descriptors queue for running */
111 bool idle; /* channel statue machine */
113 struct dma_pool *desc_pool; /* Descriptors pool */
116 struct mmp_pdma_phy {
119 struct mmp_pdma_chan *vchan;
122 struct mmp_pdma_device {
126 struct dma_device device;
127 struct mmp_pdma_phy *phy;
128 spinlock_t phy_lock; /* protect alloc/free phy channels */
131 #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx)
132 #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node)
133 #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan)
134 #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device)
136 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr)
138 u32 reg = (phy->idx << 4) + DDADR;
140 writel(addr, phy->base + reg);
143 static void enable_chan(struct mmp_pdma_phy *phy)
150 reg = DRCMR(phy->vchan->drcmr);
151 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
153 reg = (phy->idx << 2) + DCSR;
154 writel(readl(phy->base + reg) | DCSR_RUN,
158 static void disable_chan(struct mmp_pdma_phy *phy)
163 reg = (phy->idx << 2) + DCSR;
164 writel(readl(phy->base + reg) & ~DCSR_RUN,
169 static int clear_chan_irq(struct mmp_pdma_phy *phy)
172 u32 dint = readl(phy->base + DINT);
173 u32 reg = (phy->idx << 2) + DCSR;
175 if (dint & BIT(phy->idx)) {
177 dcsr = readl(phy->base + reg);
178 writel(dcsr, phy->base + reg);
179 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
180 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
186 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
188 struct mmp_pdma_phy *phy = dev_id;
190 if (clear_chan_irq(phy) == 0) {
191 tasklet_schedule(&phy->vchan->tasklet);
197 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
199 struct mmp_pdma_device *pdev = dev_id;
200 struct mmp_pdma_phy *phy;
201 u32 dint = readl(pdev->base + DINT);
209 ret = mmp_pdma_chan_handler(irq, phy);
210 if (ret == IRQ_HANDLED)
220 /* lookup free phy channel as descending priority */
221 static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
224 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
225 struct mmp_pdma_phy *phy, *found = NULL;
229 * dma channel priorities
230 * ch 0 - 3, 16 - 19 <--> (0)
231 * ch 4 - 7, 20 - 23 <--> (1)
232 * ch 8 - 11, 24 - 27 <--> (2)
233 * ch 12 - 15, 28 - 31 <--> (3)
236 spin_lock_irqsave(&pdev->phy_lock, flags);
237 for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) {
238 for (i = 0; i < pdev->dma_channels; i++) {
239 if (prio != ((i & 0xf) >> 2))
251 spin_unlock_irqrestore(&pdev->phy_lock, flags);
255 static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
257 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
264 /* clear the channel mapping in DRCMR */
265 reg = DRCMR(pchan->phy->vchan->drcmr);
266 writel(0, pchan->phy->base + reg);
268 spin_lock_irqsave(&pdev->phy_lock, flags);
269 pchan->phy->vchan = NULL;
271 spin_unlock_irqrestore(&pdev->phy_lock, flags);
274 /* desc->tx_list ==> pending list */
275 static void append_pending_queue(struct mmp_pdma_chan *chan,
276 struct mmp_pdma_desc_sw *desc)
278 struct mmp_pdma_desc_sw *tail =
279 to_mmp_pdma_desc(chan->chain_pending.prev);
281 if (list_empty(&chan->chain_pending))
284 /* one irq per queue, even appended */
285 tail->desc.ddadr = desc->async_tx.phys;
286 tail->desc.dcmd &= ~DCMD_ENDIRQEN;
288 /* softly link to pending list */
290 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
294 * start_pending_queue - transfer any pending transactions
295 * pending list ==> running list
297 static void start_pending_queue(struct mmp_pdma_chan *chan)
299 struct mmp_pdma_desc_sw *desc;
301 /* still in running, irq will start the pending list */
303 dev_dbg(chan->dev, "DMA controller still busy\n");
307 if (list_empty(&chan->chain_pending)) {
308 /* chance to re-fetch phy channel with higher prio */
309 mmp_pdma_free_phy(chan);
310 dev_dbg(chan->dev, "no pending list\n");
315 chan->phy = lookup_phy(chan);
317 dev_dbg(chan->dev, "no free dma channel\n");
324 * reintilize pending list
326 desc = list_first_entry(&chan->chain_pending,
327 struct mmp_pdma_desc_sw, node);
328 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
331 * Program the descriptor's address into the DMA controller,
332 * then start the DMA transaction
334 set_desc(chan->phy, desc->async_tx.phys);
335 enable_chan(chan->phy);
340 /* desc->tx_list ==> pending list */
341 static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
343 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
344 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
345 struct mmp_pdma_desc_sw *child;
347 dma_cookie_t cookie = -EBUSY;
349 spin_lock_irqsave(&chan->desc_lock, flags);
351 list_for_each_entry(child, &desc->tx_list, node) {
352 cookie = dma_cookie_assign(&child->async_tx);
355 append_pending_queue(chan, desc);
357 spin_unlock_irqrestore(&chan->desc_lock, flags);
362 static struct mmp_pdma_desc_sw *
363 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
365 struct mmp_pdma_desc_sw *desc;
368 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
370 dev_err(chan->dev, "out of memory for link descriptor\n");
374 memset(desc, 0, sizeof(*desc));
375 INIT_LIST_HEAD(&desc->tx_list);
376 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
377 /* each desc has submit */
378 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
379 desc->async_tx.phys = pdesc;
385 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
387 * This function will create a dma pool for descriptor allocation.
388 * Request irq only when channel is requested
389 * Return - The number of allocated descriptors.
392 static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
394 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
400 dma_pool_create(dev_name(&dchan->dev->device), chan->dev,
401 sizeof(struct mmp_pdma_desc_sw),
402 __alignof__(struct mmp_pdma_desc_sw), 0);
403 if (!chan->desc_pool) {
404 dev_err(chan->dev, "unable to allocate descriptor pool\n");
407 mmp_pdma_free_phy(chan);
413 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
414 struct list_head *list)
416 struct mmp_pdma_desc_sw *desc, *_desc;
418 list_for_each_entry_safe(desc, _desc, list, node) {
419 list_del(&desc->node);
420 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
424 static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
426 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
429 spin_lock_irqsave(&chan->desc_lock, flags);
430 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
431 mmp_pdma_free_desc_list(chan, &chan->chain_running);
432 spin_unlock_irqrestore(&chan->desc_lock, flags);
434 dma_pool_destroy(chan->desc_pool);
435 chan->desc_pool = NULL;
438 mmp_pdma_free_phy(chan);
442 static struct dma_async_tx_descriptor *
443 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
444 dma_addr_t dma_dst, dma_addr_t dma_src,
445 size_t len, unsigned long flags)
447 struct mmp_pdma_chan *chan;
448 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
457 chan = to_mmp_pdma_chan(dchan);
460 chan->dir = DMA_MEM_TO_MEM;
461 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
462 chan->dcmd |= DCMD_BURST32;
466 /* Allocate the link descriptor from DMA pool */
467 new = mmp_pdma_alloc_descriptor(chan);
469 dev_err(chan->dev, "no memory for desc\n");
473 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
475 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
476 new->desc.dsadr = dma_src;
477 new->desc.dtadr = dma_dst;
482 prev->desc.ddadr = new->async_tx.phys;
484 new->async_tx.cookie = 0;
485 async_tx_ack(&new->async_tx);
490 if (chan->dir == DMA_MEM_TO_DEV) {
492 } else if (chan->dir == DMA_DEV_TO_MEM) {
494 } else if (chan->dir == DMA_MEM_TO_MEM) {
499 /* Insert the link descriptor to the LD ring */
500 list_add_tail(&new->node, &first->tx_list);
503 first->async_tx.flags = flags; /* client is in control of this ack */
504 first->async_tx.cookie = -EBUSY;
506 /* last desc and fire IRQ */
507 new->desc.ddadr = DDADR_STOP;
508 new->desc.dcmd |= DCMD_ENDIRQEN;
510 return &first->async_tx;
514 mmp_pdma_free_desc_list(chan, &first->tx_list);
518 static struct dma_async_tx_descriptor *
519 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
520 unsigned int sg_len, enum dma_transfer_direction dir,
521 unsigned long flags, void *context)
523 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
524 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
526 struct scatterlist *sg;
530 if ((sgl == NULL) || (sg_len == 0))
533 for_each_sg(sgl, sg, sg_len, i) {
534 addr = sg_dma_address(sg);
535 avail = sg_dma_len(sgl);
538 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
540 /* allocate and populate the descriptor */
541 new = mmp_pdma_alloc_descriptor(chan);
543 dev_err(chan->dev, "no memory for desc\n");
547 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
548 if (dir == DMA_MEM_TO_DEV) {
549 new->desc.dsadr = addr;
550 new->desc.dtadr = chan->dev_addr;
552 new->desc.dsadr = chan->dev_addr;
553 new->desc.dtadr = addr;
559 prev->desc.ddadr = new->async_tx.phys;
561 new->async_tx.cookie = 0;
562 async_tx_ack(&new->async_tx);
565 /* Insert the link descriptor to the LD ring */
566 list_add_tail(&new->node, &first->tx_list);
568 /* update metadata */
574 first->async_tx.cookie = -EBUSY;
575 first->async_tx.flags = flags;
577 /* last desc and fire IRQ */
578 new->desc.ddadr = DDADR_STOP;
579 new->desc.dcmd |= DCMD_ENDIRQEN;
581 return &first->async_tx;
585 mmp_pdma_free_desc_list(chan, &first->tx_list);
589 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
592 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
593 struct dma_slave_config *cfg = (void *)arg;
596 u32 maxburst = 0, addr = 0;
597 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
603 case DMA_TERMINATE_ALL:
604 disable_chan(chan->phy);
605 mmp_pdma_free_phy(chan);
606 spin_lock_irqsave(&chan->desc_lock, flags);
607 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
608 mmp_pdma_free_desc_list(chan, &chan->chain_running);
609 spin_unlock_irqrestore(&chan->desc_lock, flags);
612 case DMA_SLAVE_CONFIG:
613 if (cfg->direction == DMA_DEV_TO_MEM) {
614 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
615 maxburst = cfg->src_maxburst;
616 width = cfg->src_addr_width;
617 addr = cfg->src_addr;
618 } else if (cfg->direction == DMA_MEM_TO_DEV) {
619 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
620 maxburst = cfg->dst_maxburst;
621 width = cfg->dst_addr_width;
622 addr = cfg->dst_addr;
625 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
626 chan->dcmd |= DCMD_WIDTH1;
627 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
628 chan->dcmd |= DCMD_WIDTH2;
629 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
630 chan->dcmd |= DCMD_WIDTH4;
633 chan->dcmd |= DCMD_BURST8;
634 else if (maxburst == 16)
635 chan->dcmd |= DCMD_BURST16;
636 else if (maxburst == 32)
637 chan->dcmd |= DCMD_BURST32;
639 chan->dir = cfg->direction;
640 chan->dev_addr = addr;
641 /* FIXME: drivers should be ported over to use the filter
642 * function. Once that's done, the following two lines can
646 chan->drcmr = cfg->slave_id;
655 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
656 dma_cookie_t cookie, struct dma_tx_state *txstate)
658 return dma_cookie_status(dchan, cookie, txstate);
662 * mmp_pdma_issue_pending - Issue the DMA start command
663 * pending list ==> running list
665 static void mmp_pdma_issue_pending(struct dma_chan *dchan)
667 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
670 spin_lock_irqsave(&chan->desc_lock, flags);
671 start_pending_queue(chan);
672 spin_unlock_irqrestore(&chan->desc_lock, flags);
680 static void dma_do_tasklet(unsigned long data)
682 struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data;
683 struct mmp_pdma_desc_sw *desc, *_desc;
684 LIST_HEAD(chain_cleanup);
687 /* submit pending list; callback for each desc; free desc */
689 spin_lock_irqsave(&chan->desc_lock, flags);
691 /* update the cookie if we have some descriptors to cleanup */
692 if (!list_empty(&chan->chain_running)) {
695 desc = to_mmp_pdma_desc(chan->chain_running.prev);
696 cookie = desc->async_tx.cookie;
697 dma_cookie_complete(&desc->async_tx);
699 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
703 * move the descriptors to a temporary list so we can drop the lock
704 * during the entire cleanup operation
706 list_splice_tail_init(&chan->chain_running, &chain_cleanup);
708 /* the hardware is now idle and ready for more */
711 /* Start any pending transactions automatically */
712 start_pending_queue(chan);
713 spin_unlock_irqrestore(&chan->desc_lock, flags);
715 /* Run the callback for each descriptor, in order */
716 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
717 struct dma_async_tx_descriptor *txd = &desc->async_tx;
719 /* Remove from the list of transactions */
720 list_del(&desc->node);
721 /* Run the link descriptor callback function */
723 txd->callback(txd->callback_param);
725 dma_pool_free(chan->desc_pool, desc, txd->phys);
729 static int mmp_pdma_remove(struct platform_device *op)
731 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
733 dma_async_device_unregister(&pdev->device);
737 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev,
740 struct mmp_pdma_phy *phy = &pdev->phy[idx];
741 struct mmp_pdma_chan *chan;
744 chan = devm_kzalloc(pdev->dev,
745 sizeof(struct mmp_pdma_chan), GFP_KERNEL);
750 phy->base = pdev->base;
753 ret = devm_request_irq(pdev->dev, irq,
754 mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy);
756 dev_err(pdev->dev, "channel request irq fail!\n");
761 spin_lock_init(&chan->desc_lock);
762 chan->dev = pdev->dev;
763 chan->chan.device = &pdev->device;
764 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
765 INIT_LIST_HEAD(&chan->chain_pending);
766 INIT_LIST_HEAD(&chan->chain_running);
768 /* register virt channel to dma engine */
769 list_add_tail(&chan->chan.device_node,
770 &pdev->device.channels);
775 static struct of_device_id mmp_pdma_dt_ids[] = {
776 { .compatible = "marvell,pdma-1.0", },
779 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
781 static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
782 struct of_dma *ofdma)
784 struct mmp_pdma_device *d = ofdma->of_dma_data;
785 struct dma_chan *chan, *candidate;
790 /* walk the list of channels registered with the current instance and
791 * find one that is currently unused */
792 list_for_each_entry(chan, &d->device.channels, device_node)
793 if (chan->client_count == 0) {
801 /* dma_get_slave_channel will return NULL if we lost a race between
802 * the lookup and the reservation */
803 chan = dma_get_slave_channel(candidate);
806 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
807 c->drcmr = dma_spec->args[0];
814 static int mmp_pdma_probe(struct platform_device *op)
816 struct mmp_pdma_device *pdev;
817 const struct of_device_id *of_id;
818 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
819 struct resource *iores;
821 int dma_channels = 0, irq_num = 0;
823 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
826 pdev->dev = &op->dev;
828 spin_lock_init(&pdev->phy_lock);
830 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
834 pdev->base = devm_ioremap_resource(pdev->dev, iores);
835 if (IS_ERR(pdev->base))
836 return PTR_ERR(pdev->base);
838 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev);
840 of_property_read_u32(pdev->dev->of_node,
841 "#dma-channels", &dma_channels);
842 else if (pdata && pdata->dma_channels)
843 dma_channels = pdata->dma_channels;
845 dma_channels = 32; /* default 32 channel */
846 pdev->dma_channels = dma_channels;
848 for (i = 0; i < dma_channels; i++) {
849 if (platform_get_irq(op, i) > 0)
853 pdev->phy = devm_kzalloc(pdev->dev,
854 dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL);
855 if (pdev->phy == NULL)
858 INIT_LIST_HEAD(&pdev->device.channels);
860 if (irq_num != dma_channels) {
861 /* all chan share one irq, demux inside */
862 irq = platform_get_irq(op, 0);
863 ret = devm_request_irq(pdev->dev, irq,
864 mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev);
869 for (i = 0; i < dma_channels; i++) {
870 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
871 ret = mmp_pdma_chan_init(pdev, i, irq);
876 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
877 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
878 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
879 pdev->device.dev = &op->dev;
880 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
881 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
882 pdev->device.device_tx_status = mmp_pdma_tx_status;
883 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
884 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
885 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
886 pdev->device.device_control = mmp_pdma_control;
887 pdev->device.copy_align = PDMA_ALIGNMENT;
889 if (pdev->dev->coherent_dma_mask)
890 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
892 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
894 ret = dma_async_device_register(&pdev->device);
896 dev_err(pdev->device.dev, "unable to register\n");
900 if (op->dev.of_node) {
901 /* Device-tree DMA controller registration */
902 ret = of_dma_controller_register(op->dev.of_node,
903 mmp_pdma_dma_xlate, pdev);
905 dev_err(&op->dev, "of_dma_controller_register failed\n");
910 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
914 static const struct platform_device_id mmp_pdma_id_table[] = {
919 static struct platform_driver mmp_pdma_driver = {
922 .owner = THIS_MODULE,
923 .of_match_table = mmp_pdma_dt_ids,
925 .id_table = mmp_pdma_id_table,
926 .probe = mmp_pdma_probe,
927 .remove = mmp_pdma_remove,
930 bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param)
932 struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan);
934 if (chan->device->dev->driver != &mmp_pdma_driver.driver)
937 c->drcmr = *(unsigned int *) param;
941 EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn);
943 module_platform_driver(mmp_pdma_driver);
945 MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver");
946 MODULE_AUTHOR("Marvell International Ltd.");
947 MODULE_LICENSE("GPL v2");