2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4 * extracted from shdma.c
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
16 #include <linux/delay.h>
17 #include <linux/shdma-base.h>
18 #include <linux/dmaengine.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/module.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
26 #include "../dmaengine.h"
28 /* DMA descriptor control */
29 enum shdma_desc_status {
33 DESC_COMPLETED, /* completed, have to call callback */
34 DESC_WAITING, /* callback called, waiting for ack / re-submit */
37 #define NR_DESCS_PER_CHANNEL 32
39 #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
40 #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
43 * For slave DMA we assume, that there is a finite number of DMA slaves in the
44 * system, and that each such slave can only use a finite number of channels.
45 * We use slave channel IDs to make sure, that no such slave channel ID is
46 * allocated more than once.
48 static unsigned int slave_num = 256;
49 module_param(slave_num, uint, 0444);
51 /* A bitmask with slave_num bits */
52 static unsigned long *shdma_slave_used;
54 /* Called under spin_lock_irq(&schan->chan_lock") */
55 static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
57 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
58 const struct shdma_ops *ops = sdev->ops;
59 struct shdma_desc *sdesc;
62 if (ops->channel_busy(schan))
65 /* Find the first not transferred descriptor */
66 list_for_each_entry(sdesc, &schan->ld_queue, node)
67 if (sdesc->mark == DESC_SUBMITTED) {
68 ops->start_xfer(schan, sdesc);
73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
75 struct shdma_desc *chunk, *c, *desc =
76 container_of(tx, struct shdma_desc, async_tx),
78 struct shdma_chan *schan = to_shdma_chan(tx->chan);
79 struct shdma_slave *slave = schan->slave;
80 dma_async_tx_callback callback = tx->callback;
84 spin_lock_irq(&schan->chan_lock);
86 power_up = list_empty(&schan->ld_queue);
88 cookie = dma_cookie_assign(tx);
90 /* Mark all chunks of this descriptor as submitted, move to the queue */
91 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
93 * All chunks are on the global ld_free, so, we have to find
94 * the end of the chain ourselves
96 if (chunk != desc && (chunk->mark == DESC_IDLE ||
97 chunk->async_tx.cookie > 0 ||
98 chunk->async_tx.cookie == -EBUSY ||
99 &chunk->node == &schan->ld_free))
101 chunk->mark = DESC_SUBMITTED;
102 /* Callback goes to the last chunk */
103 chunk->async_tx.callback = NULL;
104 chunk->cookie = cookie;
105 list_move_tail(&chunk->node, &schan->ld_queue);
108 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
109 tx->cookie, &last->async_tx, schan->id);
112 last->async_tx.callback = callback;
113 last->async_tx.callback_param = tx->callback_param;
117 schan->pm_state = SHDMA_PM_BUSY;
119 ret = pm_runtime_get(schan->dev);
121 spin_unlock_irq(&schan->chan_lock);
123 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
125 pm_runtime_barrier(schan->dev);
127 spin_lock_irq(&schan->chan_lock);
129 /* Have we been reset, while waiting? */
130 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
131 struct shdma_dev *sdev =
132 to_shdma_dev(schan->dma_chan.device);
133 const struct shdma_ops *ops = sdev->ops;
134 dev_dbg(schan->dev, "Bring up channel %d\n",
137 * TODO: .xfer_setup() might fail on some platforms.
138 * Make it int then, on error remove chunks from the
141 ops->setup_xfer(schan, slave);
143 if (schan->pm_state == SHDMA_PM_PENDING)
144 shdma_chan_xfer_ld_queue(schan);
145 schan->pm_state = SHDMA_PM_ESTABLISHED;
149 * Tell .device_issue_pending() not to run the queue, interrupts
152 schan->pm_state = SHDMA_PM_PENDING;
155 spin_unlock_irq(&schan->chan_lock);
160 /* Called with desc_lock held */
161 static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
163 struct shdma_desc *sdesc;
165 list_for_each_entry(sdesc, &schan->ld_free, node)
166 if (sdesc->mark != DESC_PREPARED) {
167 BUG_ON(sdesc->mark != DESC_IDLE);
168 list_del(&sdesc->node);
175 static int shdma_alloc_chan_resources(struct dma_chan *chan)
177 struct shdma_chan *schan = to_shdma_chan(chan);
178 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
179 const struct shdma_ops *ops = sdev->ops;
180 struct shdma_desc *desc;
181 struct shdma_slave *slave = chan->private;
185 * This relies on the guarantee from dmaengine that alloc_chan_resources
186 * never runs concurrently with itself or free_chan_resources.
189 if (slave->slave_id >= slave_num) {
194 if (test_and_set_bit(slave->slave_id, shdma_slave_used)) {
199 ret = ops->set_slave(schan, slave);
204 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
205 sdev->desc_size, GFP_KERNEL);
210 schan->desc_num = NR_DESCS_PER_CHANNEL;
211 schan->slave = slave;
213 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
214 desc = ops->embedded_desc(schan->desc, i);
215 dma_async_tx_descriptor_init(&desc->async_tx,
217 desc->async_tx.tx_submit = shdma_tx_submit;
218 desc->mark = DESC_IDLE;
220 list_add(&desc->node, &schan->ld_free);
223 return NR_DESCS_PER_CHANNEL;
228 clear_bit(slave->slave_id, shdma_slave_used);
231 chan->private = NULL;
235 static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
237 struct shdma_desc *desc, *_desc;
238 /* Is the "exposed" head of a chain acked? */
239 bool head_acked = false;
240 dma_cookie_t cookie = 0;
241 dma_async_tx_callback callback = NULL;
245 spin_lock_irqsave(&schan->chan_lock, flags);
246 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
247 struct dma_async_tx_descriptor *tx = &desc->async_tx;
249 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
250 BUG_ON(desc->mark != DESC_SUBMITTED &&
251 desc->mark != DESC_COMPLETED &&
252 desc->mark != DESC_WAITING);
255 * queue is ordered, and we use this loop to (1) clean up all
256 * completed descriptors, and to (2) update descriptor flags of
257 * any chunks in a (partially) completed chain
259 if (!all && desc->mark == DESC_SUBMITTED &&
260 desc->cookie != cookie)
266 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
267 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
269 "Completing cookie %d, expected %d\n",
271 schan->dma_chan.completed_cookie + 1);
272 schan->dma_chan.completed_cookie = desc->cookie;
275 /* Call callback on the last chunk */
276 if (desc->mark == DESC_COMPLETED && tx->callback) {
277 desc->mark = DESC_WAITING;
278 callback = tx->callback;
279 param = tx->callback_param;
280 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
281 tx->cookie, tx, schan->id);
282 BUG_ON(desc->chunks != 1);
286 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
287 if (desc->mark == DESC_COMPLETED) {
288 BUG_ON(tx->cookie < 0);
289 desc->mark = DESC_WAITING;
291 head_acked = async_tx_test_ack(tx);
293 switch (desc->mark) {
295 desc->mark = DESC_WAITING;
299 async_tx_ack(&desc->async_tx);
303 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
306 if (((desc->mark == DESC_COMPLETED ||
307 desc->mark == DESC_WAITING) &&
308 async_tx_test_ack(&desc->async_tx)) || all) {
309 /* Remove from ld_queue list */
310 desc->mark = DESC_IDLE;
312 list_move(&desc->node, &schan->ld_free);
314 if (list_empty(&schan->ld_queue)) {
315 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
316 pm_runtime_put(schan->dev);
317 schan->pm_state = SHDMA_PM_ESTABLISHED;
322 if (all && !callback)
324 * Terminating and the loop completed normally: forgive
325 * uncompleted cookies
327 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
329 spin_unlock_irqrestore(&schan->chan_lock, flags);
338 * shdma_chan_ld_cleanup - Clean up link descriptors
340 * Clean up the ld_queue of DMA channel.
342 static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
344 while (__ld_cleanup(schan, all))
349 * shdma_free_chan_resources - Free all resources of the channel.
351 static void shdma_free_chan_resources(struct dma_chan *chan)
353 struct shdma_chan *schan = to_shdma_chan(chan);
354 struct shdma_dev *sdev = to_shdma_dev(chan->device);
355 const struct shdma_ops *ops = sdev->ops;
358 /* Protect against ISR */
359 spin_lock_irq(&schan->chan_lock);
360 ops->halt_channel(schan);
361 spin_unlock_irq(&schan->chan_lock);
363 /* Now no new interrupts will occur */
365 /* Prepared and not submitted descriptors can still be on the queue */
366 if (!list_empty(&schan->ld_queue))
367 shdma_chan_ld_cleanup(schan, true);
370 /* The caller is holding dma_list_mutex */
371 struct shdma_slave *slave = schan->slave;
372 clear_bit(slave->slave_id, shdma_slave_used);
373 chan->private = NULL;
376 spin_lock_irq(&schan->chan_lock);
378 list_splice_init(&schan->ld_free, &list);
381 spin_unlock_irq(&schan->chan_lock);
387 * shdma_add_desc - get, set up and return one transfer descriptor
388 * @schan: DMA channel
389 * @flags: DMA transfer flags
390 * @dst: destination DMA address, incremented when direction equals
391 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
392 * @src: source DMA address, incremented when direction equals
393 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
394 * @len: DMA transfer length
395 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
396 * @direction: needed for slave DMA to decide which address to keep constant,
397 * equals DMA_MEM_TO_MEM for MEMCPY
398 * Returns 0 or an error
399 * Locks: called with desc_lock held
401 static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
402 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
403 struct shdma_desc **first, enum dma_transfer_direction direction)
405 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
406 const struct shdma_ops *ops = sdev->ops;
407 struct shdma_desc *new;
408 size_t copy_size = *len;
413 /* Allocate the link descriptor from the free list */
414 new = shdma_get_desc(schan);
416 dev_err(schan->dev, "No free link descriptor available\n");
420 ops->desc_setup(schan, new, *src, *dst, ©_size);
424 new->async_tx.cookie = -EBUSY;
427 /* Other desc - invisible to the user */
428 new->async_tx.cookie = -EINVAL;
432 "chaining (%u/%u)@%x -> %x with %p, cookie %d\n",
433 copy_size, *len, *src, *dst, &new->async_tx,
434 new->async_tx.cookie);
436 new->mark = DESC_PREPARED;
437 new->async_tx.flags = flags;
438 new->direction = direction;
441 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
443 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
450 * shdma_prep_sg - prepare transfer descriptors from an SG list
452 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
453 * converted to scatter-gather to guarantee consistent locking and a correct
454 * list manipulation. For slave DMA direction carries the usual meaning, and,
455 * logically, the SG list is RAM and the addr variable contains slave address,
456 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
457 * and the SG list contains only one element and points at the source buffer.
459 static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
460 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
461 enum dma_transfer_direction direction, unsigned long flags)
463 struct scatterlist *sg;
464 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
467 unsigned long irq_flags;
470 for_each_sg(sgl, sg, sg_len, i)
471 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
473 /* Have to lock the whole loop to protect against concurrent release */
474 spin_lock_irqsave(&schan->chan_lock, irq_flags);
478 * first descriptor is what user is dealing with in all API calls, its
479 * cookie is at first set to -EBUSY, at tx-submit to a positive
481 * if more than one chunk is needed further chunks have cookie = -EINVAL
482 * the last chunk, if not equal to the first, has cookie = -ENOSPC
483 * all chunks are linked onto the tx_list head with their .node heads
484 * only during this function, then they are immediately spliced
485 * back onto the free list in form of a chain
487 for_each_sg(sgl, sg, sg_len, i) {
488 dma_addr_t sg_addr = sg_dma_address(sg);
489 size_t len = sg_dma_len(sg);
495 dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n",
496 i, sg, len, (unsigned long long)sg_addr);
498 if (direction == DMA_DEV_TO_MEM)
499 new = shdma_add_desc(schan, flags,
500 &sg_addr, addr, &len, &first,
503 new = shdma_add_desc(schan, flags,
504 addr, &sg_addr, &len, &first,
509 new->chunks = chunks--;
510 list_add_tail(&new->node, &tx_list);
515 new->async_tx.cookie = -ENOSPC;
517 /* Put them back on the free list, so, they don't get lost */
518 list_splice_tail(&tx_list, &schan->ld_free);
520 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
522 return &first->async_tx;
525 list_for_each_entry(new, &tx_list, node)
526 new->mark = DESC_IDLE;
527 list_splice(&tx_list, &schan->ld_free);
529 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
534 static struct dma_async_tx_descriptor *shdma_prep_memcpy(
535 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
536 size_t len, unsigned long flags)
538 struct shdma_chan *schan = to_shdma_chan(chan);
539 struct scatterlist sg;
544 BUG_ON(!schan->desc_num);
546 sg_init_table(&sg, 1);
547 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
548 offset_in_page(dma_src));
549 sg_dma_address(&sg) = dma_src;
550 sg_dma_len(&sg) = len;
552 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags);
555 static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
556 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
557 enum dma_transfer_direction direction, unsigned long flags, void *context)
559 struct shdma_chan *schan = to_shdma_chan(chan);
560 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
561 const struct shdma_ops *ops = sdev->ops;
562 struct shdma_slave *slave = schan->slave;
563 dma_addr_t slave_addr;
568 BUG_ON(!schan->desc_num);
570 /* Someone calling slave DMA on a generic channel? */
571 if (!slave || !sg_len) {
572 dev_warn(schan->dev, "%s: bad parameter: %p, %d, %d\n",
573 __func__, slave, sg_len, slave ? slave->slave_id : -1);
577 slave_addr = ops->slave_addr(schan);
579 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
583 static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
586 struct shdma_chan *schan = to_shdma_chan(chan);
587 struct shdma_dev *sdev = to_shdma_dev(chan->device);
588 const struct shdma_ops *ops = sdev->ops;
591 /* Only supports DMA_TERMINATE_ALL */
592 if (cmd != DMA_TERMINATE_ALL)
598 spin_lock_irqsave(&schan->chan_lock, flags);
600 ops->halt_channel(schan);
602 spin_unlock_irqrestore(&schan->chan_lock, flags);
604 shdma_chan_ld_cleanup(schan, true);
609 static void shdma_issue_pending(struct dma_chan *chan)
611 struct shdma_chan *schan = to_shdma_chan(chan);
613 spin_lock_irq(&schan->chan_lock);
614 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
615 shdma_chan_xfer_ld_queue(schan);
617 schan->pm_state = SHDMA_PM_PENDING;
618 spin_unlock_irq(&schan->chan_lock);
621 static enum dma_status shdma_tx_status(struct dma_chan *chan,
623 struct dma_tx_state *txstate)
625 struct shdma_chan *schan = to_shdma_chan(chan);
626 enum dma_status status;
629 shdma_chan_ld_cleanup(schan, false);
631 spin_lock_irqsave(&schan->chan_lock, flags);
633 status = dma_cookie_status(chan, cookie, txstate);
636 * If we don't find cookie on the queue, it has been aborted and we have
639 if (status != DMA_SUCCESS) {
640 struct shdma_desc *sdesc;
642 list_for_each_entry(sdesc, &schan->ld_queue, node)
643 if (sdesc->cookie == cookie) {
644 status = DMA_IN_PROGRESS;
649 spin_unlock_irqrestore(&schan->chan_lock, flags);
654 /* Called from error IRQ or NMI */
655 bool shdma_reset(struct shdma_dev *sdev)
657 const struct shdma_ops *ops = sdev->ops;
658 struct shdma_chan *schan;
659 unsigned int handled = 0;
662 /* Reset all channels */
663 shdma_for_each_chan(schan, sdev, i) {
664 struct shdma_desc *sdesc;
670 spin_lock(&schan->chan_lock);
672 /* Stop the channel */
673 ops->halt_channel(schan);
675 list_splice_init(&schan->ld_queue, &dl);
677 if (!list_empty(&dl)) {
678 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
679 pm_runtime_put(schan->dev);
681 schan->pm_state = SHDMA_PM_ESTABLISHED;
683 spin_unlock(&schan->chan_lock);
686 list_for_each_entry(sdesc, &dl, node) {
687 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
688 sdesc->mark = DESC_IDLE;
690 tx->callback(tx->callback_param);
693 spin_lock(&schan->chan_lock);
694 list_splice(&dl, &schan->ld_free);
695 spin_unlock(&schan->chan_lock);
702 EXPORT_SYMBOL(shdma_reset);
704 static irqreturn_t chan_irq(int irq, void *dev)
706 struct shdma_chan *schan = dev;
707 const struct shdma_ops *ops =
708 to_shdma_dev(schan->dma_chan.device)->ops;
711 spin_lock(&schan->chan_lock);
713 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
715 spin_unlock(&schan->chan_lock);
720 static irqreturn_t chan_irqt(int irq, void *dev)
722 struct shdma_chan *schan = dev;
723 const struct shdma_ops *ops =
724 to_shdma_dev(schan->dma_chan.device)->ops;
725 struct shdma_desc *sdesc;
727 spin_lock_irq(&schan->chan_lock);
728 list_for_each_entry(sdesc, &schan->ld_queue, node) {
729 if (sdesc->mark == DESC_SUBMITTED &&
730 ops->desc_completed(schan, sdesc)) {
731 dev_dbg(schan->dev, "done #%d@%p\n",
732 sdesc->async_tx.cookie, &sdesc->async_tx);
733 sdesc->mark = DESC_COMPLETED;
738 shdma_chan_xfer_ld_queue(schan);
739 spin_unlock_irq(&schan->chan_lock);
741 shdma_chan_ld_cleanup(schan, false);
746 int shdma_request_irq(struct shdma_chan *schan, int irq,
747 unsigned long flags, const char *name)
749 int ret = request_threaded_irq(irq, chan_irq, chan_irqt,
752 schan->irq = ret < 0 ? ret : irq;
756 EXPORT_SYMBOL(shdma_request_irq);
758 void shdma_free_irq(struct shdma_chan *schan)
761 free_irq(schan->irq, schan);
763 EXPORT_SYMBOL(shdma_free_irq);
765 void shdma_chan_probe(struct shdma_dev *sdev,
766 struct shdma_chan *schan, int id)
768 schan->pm_state = SHDMA_PM_ESTABLISHED;
770 /* reference struct dma_device */
771 schan->dma_chan.device = &sdev->dma_dev;
772 dma_cookie_init(&schan->dma_chan);
774 schan->dev = sdev->dma_dev.dev;
777 if (!schan->max_xfer_len)
778 schan->max_xfer_len = PAGE_SIZE;
780 spin_lock_init(&schan->chan_lock);
782 /* Init descripter manage list */
783 INIT_LIST_HEAD(&schan->ld_queue);
784 INIT_LIST_HEAD(&schan->ld_free);
786 /* Add the channel to DMA device channel list */
787 list_add_tail(&schan->dma_chan.device_node,
788 &sdev->dma_dev.channels);
789 sdev->schan[sdev->dma_dev.chancnt++] = schan;
791 EXPORT_SYMBOL(shdma_chan_probe);
793 void shdma_chan_remove(struct shdma_chan *schan)
795 list_del(&schan->dma_chan.device_node);
797 EXPORT_SYMBOL(shdma_chan_remove);
799 int shdma_init(struct device *dev, struct shdma_dev *sdev,
802 struct dma_device *dma_dev = &sdev->dma_dev;
805 * Require all call-backs for now, they can trivially be made optional
810 !sdev->ops->embedded_desc ||
811 !sdev->ops->start_xfer ||
812 !sdev->ops->setup_xfer ||
813 !sdev->ops->set_slave ||
814 !sdev->ops->desc_setup ||
815 !sdev->ops->slave_addr ||
816 !sdev->ops->channel_busy ||
817 !sdev->ops->halt_channel ||
818 !sdev->ops->desc_completed)
821 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
825 INIT_LIST_HEAD(&dma_dev->channels);
827 /* Common and MEMCPY operations */
828 dma_dev->device_alloc_chan_resources
829 = shdma_alloc_chan_resources;
830 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
831 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
832 dma_dev->device_tx_status = shdma_tx_status;
833 dma_dev->device_issue_pending = shdma_issue_pending;
835 /* Compulsory for DMA_SLAVE fields */
836 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
837 dma_dev->device_control = shdma_control;
843 EXPORT_SYMBOL(shdma_init);
845 void shdma_cleanup(struct shdma_dev *sdev)
849 EXPORT_SYMBOL(shdma_cleanup);
851 static int __init shdma_enter(void)
853 shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) *
854 sizeof(long), GFP_KERNEL);
855 if (!shdma_slave_used)
859 module_init(shdma_enter);
861 static void __exit shdma_exit(void)
863 kfree(shdma_slave_used);
865 module_exit(shdma_exit);
867 MODULE_LICENSE("GPL v2");
868 MODULE_DESCRIPTION("SH-DMA driver base library");
869 MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");