Merge tag 'v3.5-rc7' into late/soc
[firefly-linux-kernel-4.4.55.git] / drivers / dma / dw_dmac.c
1 /*
2  * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on
3  * AVR32 systems.)
4  *
5  * Copyright (C) 2007-2008 Atmel Corporation
6  * Copyright (C) 2010-2011 ST Microelectronics
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/bitops.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/init.h>
18 #include <linux/interrupt.h>
19 #include <linux/io.h>
20 #include <linux/of.h>
21 #include <linux/mm.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
25
26 #include "dw_dmac_regs.h"
27 #include "dmaengine.h"
28
29 /*
30  * This supports the Synopsys "DesignWare AHB Central DMA Controller",
31  * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
32  * of which use ARM any more).  See the "Databook" from Synopsys for
33  * information beyond what licensees probably provide.
34  *
35  * The driver has currently been tested only with the Atmel AT32AP7000,
36  * which does not support descriptor writeback.
37  */
38
39 #define DWC_DEFAULT_CTLLO(_chan) ({                             \
40                 struct dw_dma_slave *__slave = (_chan->private);        \
41                 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan);       \
42                 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \
43                 int _dms = __slave ? __slave->dst_master : 0;   \
44                 int _sms = __slave ? __slave->src_master : 1;   \
45                 u8 _smsize = __slave ? _sconfig->src_maxburst : \
46                         DW_DMA_MSIZE_16;                        \
47                 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \
48                         DW_DMA_MSIZE_16;                        \
49                                                                 \
50                 (DWC_CTLL_DST_MSIZE(_dmsize)                    \
51                  | DWC_CTLL_SRC_MSIZE(_smsize)                  \
52                  | DWC_CTLL_LLP_D_EN                            \
53                  | DWC_CTLL_LLP_S_EN                            \
54                  | DWC_CTLL_DMS(_dms)                           \
55                  | DWC_CTLL_SMS(_sms));                         \
56         })
57
58 /*
59  * This is configuration-dependent and usually a funny size like 4095.
60  *
61  * Note that this is a transfer count, i.e. if we transfer 32-bit
62  * words, we can do 16380 bytes per descriptor.
63  *
64  * This parameter is also system-specific.
65  */
66 #define DWC_MAX_COUNT   4095U
67
68 /*
69  * Number of descriptors to allocate for each channel. This should be
70  * made configurable somehow; preferably, the clients (at least the
71  * ones using slave transfers) should be able to give us a hint.
72  */
73 #define NR_DESCS_PER_CHANNEL    64
74
75 /*----------------------------------------------------------------------*/
76
77 /*
78  * Because we're not relying on writeback from the controller (it may not
79  * even be configured into the core!) we don't need to use dma_pool.  These
80  * descriptors -- and associated data -- are cacheable.  We do need to make
81  * sure their dcache entries are written back before handing them off to
82  * the controller, though.
83  */
84
85 static struct device *chan2dev(struct dma_chan *chan)
86 {
87         return &chan->dev->device;
88 }
89 static struct device *chan2parent(struct dma_chan *chan)
90 {
91         return chan->dev->device.parent;
92 }
93
94 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
95 {
96         return list_entry(dwc->active_list.next, struct dw_desc, desc_node);
97 }
98
99 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
100 {
101         struct dw_desc *desc, *_desc;
102         struct dw_desc *ret = NULL;
103         unsigned int i = 0;
104         unsigned long flags;
105
106         spin_lock_irqsave(&dwc->lock, flags);
107         list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
108                 if (async_tx_test_ack(&desc->txd)) {
109                         list_del(&desc->desc_node);
110                         ret = desc;
111                         break;
112                 }
113                 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
114                 i++;
115         }
116         spin_unlock_irqrestore(&dwc->lock, flags);
117
118         dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
119
120         return ret;
121 }
122
123 static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
124 {
125         struct dw_desc  *child;
126
127         list_for_each_entry(child, &desc->tx_list, desc_node)
128                 dma_sync_single_for_cpu(chan2parent(&dwc->chan),
129                                 child->txd.phys, sizeof(child->lli),
130                                 DMA_TO_DEVICE);
131         dma_sync_single_for_cpu(chan2parent(&dwc->chan),
132                         desc->txd.phys, sizeof(desc->lli),
133                         DMA_TO_DEVICE);
134 }
135
136 /*
137  * Move a descriptor, including any children, to the free list.
138  * `desc' must not be on any lists.
139  */
140 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
141 {
142         unsigned long flags;
143
144         if (desc) {
145                 struct dw_desc *child;
146
147                 dwc_sync_desc_for_cpu(dwc, desc);
148
149                 spin_lock_irqsave(&dwc->lock, flags);
150                 list_for_each_entry(child, &desc->tx_list, desc_node)
151                         dev_vdbg(chan2dev(&dwc->chan),
152                                         "moving child desc %p to freelist\n",
153                                         child);
154                 list_splice_init(&desc->tx_list, &dwc->free_list);
155                 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
156                 list_add(&desc->desc_node, &dwc->free_list);
157                 spin_unlock_irqrestore(&dwc->lock, flags);
158         }
159 }
160
161 static void dwc_initialize(struct dw_dma_chan *dwc)
162 {
163         struct dw_dma *dw = to_dw_dma(dwc->chan.device);
164         struct dw_dma_slave *dws = dwc->chan.private;
165         u32 cfghi = DWC_CFGH_FIFO_MODE;
166         u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
167
168         if (dwc->initialized == true)
169                 return;
170
171         if (dws) {
172                 /*
173                  * We need controller-specific data to set up slave
174                  * transfers.
175                  */
176                 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
177
178                 cfghi = dws->cfg_hi;
179                 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
180         }
181
182         channel_writel(dwc, CFG_LO, cfglo);
183         channel_writel(dwc, CFG_HI, cfghi);
184
185         /* Enable interrupts */
186         channel_set_bit(dw, MASK.XFER, dwc->mask);
187         channel_set_bit(dw, MASK.ERROR, dwc->mask);
188
189         dwc->initialized = true;
190 }
191
192 /*----------------------------------------------------------------------*/
193
194 /* Called with dwc->lock held and bh disabled */
195 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
196 {
197         struct dw_dma   *dw = to_dw_dma(dwc->chan.device);
198
199         /* ASSERT:  channel is idle */
200         if (dma_readl(dw, CH_EN) & dwc->mask) {
201                 dev_err(chan2dev(&dwc->chan),
202                         "BUG: Attempted to start non-idle channel\n");
203                 dev_err(chan2dev(&dwc->chan),
204                         "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
205                         channel_readl(dwc, SAR),
206                         channel_readl(dwc, DAR),
207                         channel_readl(dwc, LLP),
208                         channel_readl(dwc, CTL_HI),
209                         channel_readl(dwc, CTL_LO));
210
211                 /* The tasklet will hopefully advance the queue... */
212                 return;
213         }
214
215         dwc_initialize(dwc);
216
217         channel_writel(dwc, LLP, first->txd.phys);
218         channel_writel(dwc, CTL_LO,
219                         DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
220         channel_writel(dwc, CTL_HI, 0);
221         channel_set_bit(dw, CH_EN, dwc->mask);
222 }
223
224 /*----------------------------------------------------------------------*/
225
226 static void
227 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
228                 bool callback_required)
229 {
230         dma_async_tx_callback           callback = NULL;
231         void                            *param = NULL;
232         struct dma_async_tx_descriptor  *txd = &desc->txd;
233         struct dw_desc                  *child;
234         unsigned long                   flags;
235
236         dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
237
238         spin_lock_irqsave(&dwc->lock, flags);
239         dma_cookie_complete(txd);
240         if (callback_required) {
241                 callback = txd->callback;
242                 param = txd->callback_param;
243         }
244
245         dwc_sync_desc_for_cpu(dwc, desc);
246
247         /* async_tx_ack */
248         list_for_each_entry(child, &desc->tx_list, desc_node)
249                 async_tx_ack(&child->txd);
250         async_tx_ack(&desc->txd);
251
252         list_splice_init(&desc->tx_list, &dwc->free_list);
253         list_move(&desc->desc_node, &dwc->free_list);
254
255         if (!dwc->chan.private) {
256                 struct device *parent = chan2parent(&dwc->chan);
257                 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
258                         if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
259                                 dma_unmap_single(parent, desc->lli.dar,
260                                                 desc->len, DMA_FROM_DEVICE);
261                         else
262                                 dma_unmap_page(parent, desc->lli.dar,
263                                                 desc->len, DMA_FROM_DEVICE);
264                 }
265                 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
266                         if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
267                                 dma_unmap_single(parent, desc->lli.sar,
268                                                 desc->len, DMA_TO_DEVICE);
269                         else
270                                 dma_unmap_page(parent, desc->lli.sar,
271                                                 desc->len, DMA_TO_DEVICE);
272                 }
273         }
274
275         spin_unlock_irqrestore(&dwc->lock, flags);
276
277         if (callback_required && callback)
278                 callback(param);
279 }
280
281 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
282 {
283         struct dw_desc *desc, *_desc;
284         LIST_HEAD(list);
285         unsigned long flags;
286
287         spin_lock_irqsave(&dwc->lock, flags);
288         if (dma_readl(dw, CH_EN) & dwc->mask) {
289                 dev_err(chan2dev(&dwc->chan),
290                         "BUG: XFER bit set, but channel not idle!\n");
291
292                 /* Try to continue after resetting the channel... */
293                 channel_clear_bit(dw, CH_EN, dwc->mask);
294                 while (dma_readl(dw, CH_EN) & dwc->mask)
295                         cpu_relax();
296         }
297
298         /*
299          * Submit queued descriptors ASAP, i.e. before we go through
300          * the completed ones.
301          */
302         list_splice_init(&dwc->active_list, &list);
303         if (!list_empty(&dwc->queue)) {
304                 list_move(dwc->queue.next, &dwc->active_list);
305                 dwc_dostart(dwc, dwc_first_active(dwc));
306         }
307
308         spin_unlock_irqrestore(&dwc->lock, flags);
309
310         list_for_each_entry_safe(desc, _desc, &list, desc_node)
311                 dwc_descriptor_complete(dwc, desc, true);
312 }
313
314 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
315 {
316         dma_addr_t llp;
317         struct dw_desc *desc, *_desc;
318         struct dw_desc *child;
319         u32 status_xfer;
320         unsigned long flags;
321
322         spin_lock_irqsave(&dwc->lock, flags);
323         llp = channel_readl(dwc, LLP);
324         status_xfer = dma_readl(dw, RAW.XFER);
325
326         if (status_xfer & dwc->mask) {
327                 /* Everything we've submitted is done */
328                 dma_writel(dw, CLEAR.XFER, dwc->mask);
329                 spin_unlock_irqrestore(&dwc->lock, flags);
330
331                 dwc_complete_all(dw, dwc);
332                 return;
333         }
334
335         if (list_empty(&dwc->active_list)) {
336                 spin_unlock_irqrestore(&dwc->lock, flags);
337                 return;
338         }
339
340         dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
341
342         list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
343                 /* check first descriptors addr */
344                 if (desc->txd.phys == llp) {
345                         spin_unlock_irqrestore(&dwc->lock, flags);
346                         return;
347                 }
348
349                 /* check first descriptors llp */
350                 if (desc->lli.llp == llp) {
351                         /* This one is currently in progress */
352                         spin_unlock_irqrestore(&dwc->lock, flags);
353                         return;
354                 }
355
356                 list_for_each_entry(child, &desc->tx_list, desc_node)
357                         if (child->lli.llp == llp) {
358                                 /* Currently in progress */
359                                 spin_unlock_irqrestore(&dwc->lock, flags);
360                                 return;
361                         }
362
363                 /*
364                  * No descriptors so far seem to be in progress, i.e.
365                  * this one must be done.
366                  */
367                 spin_unlock_irqrestore(&dwc->lock, flags);
368                 dwc_descriptor_complete(dwc, desc, true);
369                 spin_lock_irqsave(&dwc->lock, flags);
370         }
371
372         dev_err(chan2dev(&dwc->chan),
373                 "BUG: All descriptors done, but channel not idle!\n");
374
375         /* Try to continue after resetting the channel... */
376         channel_clear_bit(dw, CH_EN, dwc->mask);
377         while (dma_readl(dw, CH_EN) & dwc->mask)
378                 cpu_relax();
379
380         if (!list_empty(&dwc->queue)) {
381                 list_move(dwc->queue.next, &dwc->active_list);
382                 dwc_dostart(dwc, dwc_first_active(dwc));
383         }
384         spin_unlock_irqrestore(&dwc->lock, flags);
385 }
386
387 static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
388 {
389         dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
390                         "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
391                         lli->sar, lli->dar, lli->llp,
392                         lli->ctlhi, lli->ctllo);
393 }
394
395 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
396 {
397         struct dw_desc *bad_desc;
398         struct dw_desc *child;
399         unsigned long flags;
400
401         dwc_scan_descriptors(dw, dwc);
402
403         spin_lock_irqsave(&dwc->lock, flags);
404
405         /*
406          * The descriptor currently at the head of the active list is
407          * borked. Since we don't have any way to report errors, we'll
408          * just have to scream loudly and try to carry on.
409          */
410         bad_desc = dwc_first_active(dwc);
411         list_del_init(&bad_desc->desc_node);
412         list_move(dwc->queue.next, dwc->active_list.prev);
413
414         /* Clear the error flag and try to restart the controller */
415         dma_writel(dw, CLEAR.ERROR, dwc->mask);
416         if (!list_empty(&dwc->active_list))
417                 dwc_dostart(dwc, dwc_first_active(dwc));
418
419         /*
420          * KERN_CRITICAL may seem harsh, but since this only happens
421          * when someone submits a bad physical address in a
422          * descriptor, we should consider ourselves lucky that the
423          * controller flagged an error instead of scribbling over
424          * random memory locations.
425          */
426         dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
427                         "Bad descriptor submitted for DMA!\n");
428         dev_printk(KERN_CRIT, chan2dev(&dwc->chan),
429                         "  cookie: %d\n", bad_desc->txd.cookie);
430         dwc_dump_lli(dwc, &bad_desc->lli);
431         list_for_each_entry(child, &bad_desc->tx_list, desc_node)
432                 dwc_dump_lli(dwc, &child->lli);
433
434         spin_unlock_irqrestore(&dwc->lock, flags);
435
436         /* Pretend the descriptor completed successfully */
437         dwc_descriptor_complete(dwc, bad_desc, true);
438 }
439
440 /* --------------------- Cyclic DMA API extensions -------------------- */
441
442 inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan)
443 {
444         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
445         return channel_readl(dwc, SAR);
446 }
447 EXPORT_SYMBOL(dw_dma_get_src_addr);
448
449 inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan)
450 {
451         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
452         return channel_readl(dwc, DAR);
453 }
454 EXPORT_SYMBOL(dw_dma_get_dst_addr);
455
456 /* called with dwc->lock held and all DMAC interrupts disabled */
457 static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
458                 u32 status_err, u32 status_xfer)
459 {
460         unsigned long flags;
461
462         if (dwc->mask) {
463                 void (*callback)(void *param);
464                 void *callback_param;
465
466                 dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
467                                 channel_readl(dwc, LLP));
468
469                 callback = dwc->cdesc->period_callback;
470                 callback_param = dwc->cdesc->period_callback_param;
471
472                 if (callback)
473                         callback(callback_param);
474         }
475
476         /*
477          * Error and transfer complete are highly unlikely, and will most
478          * likely be due to a configuration error by the user.
479          */
480         if (unlikely(status_err & dwc->mask) ||
481                         unlikely(status_xfer & dwc->mask)) {
482                 int i;
483
484                 dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
485                                 "interrupt, stopping DMA transfer\n",
486                                 status_xfer ? "xfer" : "error");
487
488                 spin_lock_irqsave(&dwc->lock, flags);
489
490                 dev_err(chan2dev(&dwc->chan),
491                         "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
492                         channel_readl(dwc, SAR),
493                         channel_readl(dwc, DAR),
494                         channel_readl(dwc, LLP),
495                         channel_readl(dwc, CTL_HI),
496                         channel_readl(dwc, CTL_LO));
497
498                 channel_clear_bit(dw, CH_EN, dwc->mask);
499                 while (dma_readl(dw, CH_EN) & dwc->mask)
500                         cpu_relax();
501
502                 /* make sure DMA does not restart by loading a new list */
503                 channel_writel(dwc, LLP, 0);
504                 channel_writel(dwc, CTL_LO, 0);
505                 channel_writel(dwc, CTL_HI, 0);
506
507                 dma_writel(dw, CLEAR.ERROR, dwc->mask);
508                 dma_writel(dw, CLEAR.XFER, dwc->mask);
509
510                 for (i = 0; i < dwc->cdesc->periods; i++)
511                         dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
512
513                 spin_unlock_irqrestore(&dwc->lock, flags);
514         }
515 }
516
517 /* ------------------------------------------------------------------------- */
518
519 static void dw_dma_tasklet(unsigned long data)
520 {
521         struct dw_dma *dw = (struct dw_dma *)data;
522         struct dw_dma_chan *dwc;
523         u32 status_xfer;
524         u32 status_err;
525         int i;
526
527         status_xfer = dma_readl(dw, RAW.XFER);
528         status_err = dma_readl(dw, RAW.ERROR);
529
530         dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err);
531
532         for (i = 0; i < dw->dma.chancnt; i++) {
533                 dwc = &dw->chan[i];
534                 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
535                         dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
536                 else if (status_err & (1 << i))
537                         dwc_handle_error(dw, dwc);
538                 else if (status_xfer & (1 << i))
539                         dwc_scan_descriptors(dw, dwc);
540         }
541
542         /*
543          * Re-enable interrupts.
544          */
545         channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
546         channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
547 }
548
549 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
550 {
551         struct dw_dma *dw = dev_id;
552         u32 status;
553
554         dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n",
555                         dma_readl(dw, STATUS_INT));
556
557         /*
558          * Just disable the interrupts. We'll turn them back on in the
559          * softirq handler.
560          */
561         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
562         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
563
564         status = dma_readl(dw, STATUS_INT);
565         if (status) {
566                 dev_err(dw->dma.dev,
567                         "BUG: Unexpected interrupts pending: 0x%x\n",
568                         status);
569
570                 /* Try to recover */
571                 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
572                 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
573                 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
574                 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
575         }
576
577         tasklet_schedule(&dw->tasklet);
578
579         return IRQ_HANDLED;
580 }
581
582 /*----------------------------------------------------------------------*/
583
584 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
585 {
586         struct dw_desc          *desc = txd_to_dw_desc(tx);
587         struct dw_dma_chan      *dwc = to_dw_dma_chan(tx->chan);
588         dma_cookie_t            cookie;
589         unsigned long           flags;
590
591         spin_lock_irqsave(&dwc->lock, flags);
592         cookie = dma_cookie_assign(tx);
593
594         /*
595          * REVISIT: We should attempt to chain as many descriptors as
596          * possible, perhaps even appending to those already submitted
597          * for DMA. But this is hard to do in a race-free manner.
598          */
599         if (list_empty(&dwc->active_list)) {
600                 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
601                                 desc->txd.cookie);
602                 list_add_tail(&desc->desc_node, &dwc->active_list);
603                 dwc_dostart(dwc, dwc_first_active(dwc));
604         } else {
605                 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
606                                 desc->txd.cookie);
607
608                 list_add_tail(&desc->desc_node, &dwc->queue);
609         }
610
611         spin_unlock_irqrestore(&dwc->lock, flags);
612
613         return cookie;
614 }
615
616 static struct dma_async_tx_descriptor *
617 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
618                 size_t len, unsigned long flags)
619 {
620         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
621         struct dw_desc          *desc;
622         struct dw_desc          *first;
623         struct dw_desc          *prev;
624         size_t                  xfer_count;
625         size_t                  offset;
626         unsigned int            src_width;
627         unsigned int            dst_width;
628         u32                     ctllo;
629
630         dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n",
631                         dest, src, len, flags);
632
633         if (unlikely(!len)) {
634                 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
635                 return NULL;
636         }
637
638         /*
639          * We can be a lot more clever here, but this should take care
640          * of the most common optimization.
641          */
642         if (!((src | dest  | len) & 7))
643                 src_width = dst_width = 3;
644         else if (!((src | dest  | len) & 3))
645                 src_width = dst_width = 2;
646         else if (!((src | dest | len) & 1))
647                 src_width = dst_width = 1;
648         else
649                 src_width = dst_width = 0;
650
651         ctllo = DWC_DEFAULT_CTLLO(chan)
652                         | DWC_CTLL_DST_WIDTH(dst_width)
653                         | DWC_CTLL_SRC_WIDTH(src_width)
654                         | DWC_CTLL_DST_INC
655                         | DWC_CTLL_SRC_INC
656                         | DWC_CTLL_FC_M2M;
657         prev = first = NULL;
658
659         for (offset = 0; offset < len; offset += xfer_count << src_width) {
660                 xfer_count = min_t(size_t, (len - offset) >> src_width,
661                                 DWC_MAX_COUNT);
662
663                 desc = dwc_desc_get(dwc);
664                 if (!desc)
665                         goto err_desc_get;
666
667                 desc->lli.sar = src + offset;
668                 desc->lli.dar = dest + offset;
669                 desc->lli.ctllo = ctllo;
670                 desc->lli.ctlhi = xfer_count;
671
672                 if (!first) {
673                         first = desc;
674                 } else {
675                         prev->lli.llp = desc->txd.phys;
676                         dma_sync_single_for_device(chan2parent(chan),
677                                         prev->txd.phys, sizeof(prev->lli),
678                                         DMA_TO_DEVICE);
679                         list_add_tail(&desc->desc_node,
680                                         &first->tx_list);
681                 }
682                 prev = desc;
683         }
684
685
686         if (flags & DMA_PREP_INTERRUPT)
687                 /* Trigger interrupt after last block */
688                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
689
690         prev->lli.llp = 0;
691         dma_sync_single_for_device(chan2parent(chan),
692                         prev->txd.phys, sizeof(prev->lli),
693                         DMA_TO_DEVICE);
694
695         first->txd.flags = flags;
696         first->len = len;
697
698         return &first->txd;
699
700 err_desc_get:
701         dwc_desc_put(dwc, first);
702         return NULL;
703 }
704
705 static struct dma_async_tx_descriptor *
706 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
707                 unsigned int sg_len, enum dma_transfer_direction direction,
708                 unsigned long flags, void *context)
709 {
710         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
711         struct dw_dma_slave     *dws = chan->private;
712         struct dma_slave_config *sconfig = &dwc->dma_sconfig;
713         struct dw_desc          *prev;
714         struct dw_desc          *first;
715         u32                     ctllo;
716         dma_addr_t              reg;
717         unsigned int            reg_width;
718         unsigned int            mem_width;
719         unsigned int            i;
720         struct scatterlist      *sg;
721         size_t                  total_len = 0;
722
723         dev_vdbg(chan2dev(chan), "prep_dma_slave\n");
724
725         if (unlikely(!dws || !sg_len))
726                 return NULL;
727
728         prev = first = NULL;
729
730         switch (direction) {
731         case DMA_MEM_TO_DEV:
732                 reg_width = __fls(sconfig->dst_addr_width);
733                 reg = sconfig->dst_addr;
734                 ctllo = (DWC_DEFAULT_CTLLO(chan)
735                                 | DWC_CTLL_DST_WIDTH(reg_width)
736                                 | DWC_CTLL_DST_FIX
737                                 | DWC_CTLL_SRC_INC);
738
739                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
740                         DWC_CTLL_FC(DW_DMA_FC_D_M2P);
741
742                 for_each_sg(sgl, sg, sg_len, i) {
743                         struct dw_desc  *desc;
744                         u32             len, dlen, mem;
745
746                         mem = sg_dma_address(sg);
747                         len = sg_dma_len(sg);
748
749                         if (!((mem | len) & 7))
750                                 mem_width = 3;
751                         else if (!((mem | len) & 3))
752                                 mem_width = 2;
753                         else if (!((mem | len) & 1))
754                                 mem_width = 1;
755                         else
756                                 mem_width = 0;
757
758 slave_sg_todev_fill_desc:
759                         desc = dwc_desc_get(dwc);
760                         if (!desc) {
761                                 dev_err(chan2dev(chan),
762                                         "not enough descriptors available\n");
763                                 goto err_desc_get;
764                         }
765
766                         desc->lli.sar = mem;
767                         desc->lli.dar = reg;
768                         desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
769                         if ((len >> mem_width) > DWC_MAX_COUNT) {
770                                 dlen = DWC_MAX_COUNT << mem_width;
771                                 mem += dlen;
772                                 len -= dlen;
773                         } else {
774                                 dlen = len;
775                                 len = 0;
776                         }
777
778                         desc->lli.ctlhi = dlen >> mem_width;
779
780                         if (!first) {
781                                 first = desc;
782                         } else {
783                                 prev->lli.llp = desc->txd.phys;
784                                 dma_sync_single_for_device(chan2parent(chan),
785                                                 prev->txd.phys,
786                                                 sizeof(prev->lli),
787                                                 DMA_TO_DEVICE);
788                                 list_add_tail(&desc->desc_node,
789                                                 &first->tx_list);
790                         }
791                         prev = desc;
792                         total_len += dlen;
793
794                         if (len)
795                                 goto slave_sg_todev_fill_desc;
796                 }
797                 break;
798         case DMA_DEV_TO_MEM:
799                 reg_width = __fls(sconfig->src_addr_width);
800                 reg = sconfig->src_addr;
801                 ctllo = (DWC_DEFAULT_CTLLO(chan)
802                                 | DWC_CTLL_SRC_WIDTH(reg_width)
803                                 | DWC_CTLL_DST_INC
804                                 | DWC_CTLL_SRC_FIX);
805
806                 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
807                         DWC_CTLL_FC(DW_DMA_FC_D_P2M);
808
809                 for_each_sg(sgl, sg, sg_len, i) {
810                         struct dw_desc  *desc;
811                         u32             len, dlen, mem;
812
813                         mem = sg_dma_address(sg);
814                         len = sg_dma_len(sg);
815
816                         if (!((mem | len) & 7))
817                                 mem_width = 3;
818                         else if (!((mem | len) & 3))
819                                 mem_width = 2;
820                         else if (!((mem | len) & 1))
821                                 mem_width = 1;
822                         else
823                                 mem_width = 0;
824
825 slave_sg_fromdev_fill_desc:
826                         desc = dwc_desc_get(dwc);
827                         if (!desc) {
828                                 dev_err(chan2dev(chan),
829                                                 "not enough descriptors available\n");
830                                 goto err_desc_get;
831                         }
832
833                         desc->lli.sar = reg;
834                         desc->lli.dar = mem;
835                         desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
836                         if ((len >> reg_width) > DWC_MAX_COUNT) {
837                                 dlen = DWC_MAX_COUNT << reg_width;
838                                 mem += dlen;
839                                 len -= dlen;
840                         } else {
841                                 dlen = len;
842                                 len = 0;
843                         }
844                         desc->lli.ctlhi = dlen >> reg_width;
845
846                         if (!first) {
847                                 first = desc;
848                         } else {
849                                 prev->lli.llp = desc->txd.phys;
850                                 dma_sync_single_for_device(chan2parent(chan),
851                                                 prev->txd.phys,
852                                                 sizeof(prev->lli),
853                                                 DMA_TO_DEVICE);
854                                 list_add_tail(&desc->desc_node,
855                                                 &first->tx_list);
856                         }
857                         prev = desc;
858                         total_len += dlen;
859
860                         if (len)
861                                 goto slave_sg_fromdev_fill_desc;
862                 }
863                 break;
864         default:
865                 return NULL;
866         }
867
868         if (flags & DMA_PREP_INTERRUPT)
869                 /* Trigger interrupt after last block */
870                 prev->lli.ctllo |= DWC_CTLL_INT_EN;
871
872         prev->lli.llp = 0;
873         dma_sync_single_for_device(chan2parent(chan),
874                         prev->txd.phys, sizeof(prev->lli),
875                         DMA_TO_DEVICE);
876
877         first->len = total_len;
878
879         return &first->txd;
880
881 err_desc_get:
882         dwc_desc_put(dwc, first);
883         return NULL;
884 }
885
886 /*
887  * Fix sconfig's burst size according to dw_dmac. We need to convert them as:
888  * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
889  *
890  * NOTE: burst size 2 is not supported by controller.
891  *
892  * This can be done by finding least significant bit set: n & (n - 1)
893  */
894 static inline void convert_burst(u32 *maxburst)
895 {
896         if (*maxburst > 1)
897                 *maxburst = fls(*maxburst) - 2;
898         else
899                 *maxburst = 0;
900 }
901
902 static int
903 set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
904 {
905         struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
906
907         /* Check if it is chan is configured for slave transfers */
908         if (!chan->private)
909                 return -EINVAL;
910
911         memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
912
913         convert_burst(&dwc->dma_sconfig.src_maxburst);
914         convert_burst(&dwc->dma_sconfig.dst_maxburst);
915
916         return 0;
917 }
918
919 static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
920                        unsigned long arg)
921 {
922         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
923         struct dw_dma           *dw = to_dw_dma(chan->device);
924         struct dw_desc          *desc, *_desc;
925         unsigned long           flags;
926         u32                     cfglo;
927         LIST_HEAD(list);
928
929         if (cmd == DMA_PAUSE) {
930                 spin_lock_irqsave(&dwc->lock, flags);
931
932                 cfglo = channel_readl(dwc, CFG_LO);
933                 channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
934                 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
935                         cpu_relax();
936
937                 dwc->paused = true;
938                 spin_unlock_irqrestore(&dwc->lock, flags);
939         } else if (cmd == DMA_RESUME) {
940                 if (!dwc->paused)
941                         return 0;
942
943                 spin_lock_irqsave(&dwc->lock, flags);
944
945                 cfglo = channel_readl(dwc, CFG_LO);
946                 channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
947                 dwc->paused = false;
948
949                 spin_unlock_irqrestore(&dwc->lock, flags);
950         } else if (cmd == DMA_TERMINATE_ALL) {
951                 spin_lock_irqsave(&dwc->lock, flags);
952
953                 channel_clear_bit(dw, CH_EN, dwc->mask);
954                 while (dma_readl(dw, CH_EN) & dwc->mask)
955                         cpu_relax();
956
957                 dwc->paused = false;
958
959                 /* active_list entries will end up before queued entries */
960                 list_splice_init(&dwc->queue, &list);
961                 list_splice_init(&dwc->active_list, &list);
962
963                 spin_unlock_irqrestore(&dwc->lock, flags);
964
965                 /* Flush all pending and queued descriptors */
966                 list_for_each_entry_safe(desc, _desc, &list, desc_node)
967                         dwc_descriptor_complete(dwc, desc, false);
968         } else if (cmd == DMA_SLAVE_CONFIG) {
969                 return set_runtime_config(chan, (struct dma_slave_config *)arg);
970         } else {
971                 return -ENXIO;
972         }
973
974         return 0;
975 }
976
977 static enum dma_status
978 dwc_tx_status(struct dma_chan *chan,
979               dma_cookie_t cookie,
980               struct dma_tx_state *txstate)
981 {
982         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
983         enum dma_status         ret;
984
985         ret = dma_cookie_status(chan, cookie, txstate);
986         if (ret != DMA_SUCCESS) {
987                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
988
989                 ret = dma_cookie_status(chan, cookie, txstate);
990         }
991
992         if (ret != DMA_SUCCESS)
993                 dma_set_residue(txstate, dwc_first_active(dwc)->len);
994
995         if (dwc->paused)
996                 return DMA_PAUSED;
997
998         return ret;
999 }
1000
1001 static void dwc_issue_pending(struct dma_chan *chan)
1002 {
1003         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1004
1005         if (!list_empty(&dwc->queue))
1006                 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
1007 }
1008
1009 static int dwc_alloc_chan_resources(struct dma_chan *chan)
1010 {
1011         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1012         struct dw_dma           *dw = to_dw_dma(chan->device);
1013         struct dw_desc          *desc;
1014         int                     i;
1015         unsigned long           flags;
1016
1017         dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1018
1019         /* ASSERT:  channel is idle */
1020         if (dma_readl(dw, CH_EN) & dwc->mask) {
1021                 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
1022                 return -EIO;
1023         }
1024
1025         dma_cookie_init(chan);
1026
1027         /*
1028          * NOTE: some controllers may have additional features that we
1029          * need to initialize here, like "scatter-gather" (which
1030          * doesn't mean what you think it means), and status writeback.
1031          */
1032
1033         spin_lock_irqsave(&dwc->lock, flags);
1034         i = dwc->descs_allocated;
1035         while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
1036                 spin_unlock_irqrestore(&dwc->lock, flags);
1037
1038                 desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
1039                 if (!desc) {
1040                         dev_info(chan2dev(chan),
1041                                 "only allocated %d descriptors\n", i);
1042                         spin_lock_irqsave(&dwc->lock, flags);
1043                         break;
1044                 }
1045
1046                 INIT_LIST_HEAD(&desc->tx_list);
1047                 dma_async_tx_descriptor_init(&desc->txd, chan);
1048                 desc->txd.tx_submit = dwc_tx_submit;
1049                 desc->txd.flags = DMA_CTRL_ACK;
1050                 desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli,
1051                                 sizeof(desc->lli), DMA_TO_DEVICE);
1052                 dwc_desc_put(dwc, desc);
1053
1054                 spin_lock_irqsave(&dwc->lock, flags);
1055                 i = ++dwc->descs_allocated;
1056         }
1057
1058         spin_unlock_irqrestore(&dwc->lock, flags);
1059
1060         dev_dbg(chan2dev(chan),
1061                 "alloc_chan_resources allocated %d descriptors\n", i);
1062
1063         return i;
1064 }
1065
1066 static void dwc_free_chan_resources(struct dma_chan *chan)
1067 {
1068         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1069         struct dw_dma           *dw = to_dw_dma(chan->device);
1070         struct dw_desc          *desc, *_desc;
1071         unsigned long           flags;
1072         LIST_HEAD(list);
1073
1074         dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
1075                         dwc->descs_allocated);
1076
1077         /* ASSERT:  channel is idle */
1078         BUG_ON(!list_empty(&dwc->active_list));
1079         BUG_ON(!list_empty(&dwc->queue));
1080         BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1081
1082         spin_lock_irqsave(&dwc->lock, flags);
1083         list_splice_init(&dwc->free_list, &list);
1084         dwc->descs_allocated = 0;
1085         dwc->initialized = false;
1086
1087         /* Disable interrupts */
1088         channel_clear_bit(dw, MASK.XFER, dwc->mask);
1089         channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1090
1091         spin_unlock_irqrestore(&dwc->lock, flags);
1092
1093         list_for_each_entry_safe(desc, _desc, &list, desc_node) {
1094                 dev_vdbg(chan2dev(chan), "  freeing descriptor %p\n", desc);
1095                 dma_unmap_single(chan2parent(chan), desc->txd.phys,
1096                                 sizeof(desc->lli), DMA_TO_DEVICE);
1097                 kfree(desc);
1098         }
1099
1100         dev_vdbg(chan2dev(chan), "free_chan_resources done\n");
1101 }
1102
1103 /* --------------------- Cyclic DMA API extensions -------------------- */
1104
1105 /**
1106  * dw_dma_cyclic_start - start the cyclic DMA transfer
1107  * @chan: the DMA channel to start
1108  *
1109  * Must be called with soft interrupts disabled. Returns zero on success or
1110  * -errno on failure.
1111  */
1112 int dw_dma_cyclic_start(struct dma_chan *chan)
1113 {
1114         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1115         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1116         unsigned long           flags;
1117
1118         if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
1119                 dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
1120                 return -ENODEV;
1121         }
1122
1123         spin_lock_irqsave(&dwc->lock, flags);
1124
1125         /* assert channel is idle */
1126         if (dma_readl(dw, CH_EN) & dwc->mask) {
1127                 dev_err(chan2dev(&dwc->chan),
1128                         "BUG: Attempted to start non-idle channel\n");
1129                 dev_err(chan2dev(&dwc->chan),
1130                         "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
1131                         channel_readl(dwc, SAR),
1132                         channel_readl(dwc, DAR),
1133                         channel_readl(dwc, LLP),
1134                         channel_readl(dwc, CTL_HI),
1135                         channel_readl(dwc, CTL_LO));
1136                 spin_unlock_irqrestore(&dwc->lock, flags);
1137                 return -EBUSY;
1138         }
1139
1140         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1141         dma_writel(dw, CLEAR.XFER, dwc->mask);
1142
1143         /* setup DMAC channel registers */
1144         channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
1145         channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
1146         channel_writel(dwc, CTL_HI, 0);
1147
1148         channel_set_bit(dw, CH_EN, dwc->mask);
1149
1150         spin_unlock_irqrestore(&dwc->lock, flags);
1151
1152         return 0;
1153 }
1154 EXPORT_SYMBOL(dw_dma_cyclic_start);
1155
1156 /**
1157  * dw_dma_cyclic_stop - stop the cyclic DMA transfer
1158  * @chan: the DMA channel to stop
1159  *
1160  * Must be called with soft interrupts disabled.
1161  */
1162 void dw_dma_cyclic_stop(struct dma_chan *chan)
1163 {
1164         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1165         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1166         unsigned long           flags;
1167
1168         spin_lock_irqsave(&dwc->lock, flags);
1169
1170         channel_clear_bit(dw, CH_EN, dwc->mask);
1171         while (dma_readl(dw, CH_EN) & dwc->mask)
1172                 cpu_relax();
1173
1174         spin_unlock_irqrestore(&dwc->lock, flags);
1175 }
1176 EXPORT_SYMBOL(dw_dma_cyclic_stop);
1177
1178 /**
1179  * dw_dma_cyclic_prep - prepare the cyclic DMA transfer
1180  * @chan: the DMA channel to prepare
1181  * @buf_addr: physical DMA address where the buffer starts
1182  * @buf_len: total number of bytes for the entire buffer
1183  * @period_len: number of bytes for each period
1184  * @direction: transfer direction, to or from device
1185  *
1186  * Must be called before trying to start the transfer. Returns a valid struct
1187  * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful.
1188  */
1189 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
1190                 dma_addr_t buf_addr, size_t buf_len, size_t period_len,
1191                 enum dma_transfer_direction direction)
1192 {
1193         struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
1194         struct dma_slave_config         *sconfig = &dwc->dma_sconfig;
1195         struct dw_cyclic_desc           *cdesc;
1196         struct dw_cyclic_desc           *retval = NULL;
1197         struct dw_desc                  *desc;
1198         struct dw_desc                  *last = NULL;
1199         unsigned long                   was_cyclic;
1200         unsigned int                    reg_width;
1201         unsigned int                    periods;
1202         unsigned int                    i;
1203         unsigned long                   flags;
1204
1205         spin_lock_irqsave(&dwc->lock, flags);
1206         if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
1207                 spin_unlock_irqrestore(&dwc->lock, flags);
1208                 dev_dbg(chan2dev(&dwc->chan),
1209                                 "queue and/or active list are not empty\n");
1210                 return ERR_PTR(-EBUSY);
1211         }
1212
1213         was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1214         spin_unlock_irqrestore(&dwc->lock, flags);
1215         if (was_cyclic) {
1216                 dev_dbg(chan2dev(&dwc->chan),
1217                                 "channel already prepared for cyclic DMA\n");
1218                 return ERR_PTR(-EBUSY);
1219         }
1220
1221         retval = ERR_PTR(-EINVAL);
1222
1223         if (direction == DMA_MEM_TO_DEV)
1224                 reg_width = __ffs(sconfig->dst_addr_width);
1225         else
1226                 reg_width = __ffs(sconfig->src_addr_width);
1227
1228         periods = buf_len / period_len;
1229
1230         /* Check for too big/unaligned periods and unaligned DMA buffer. */
1231         if (period_len > (DWC_MAX_COUNT << reg_width))
1232                 goto out_err;
1233         if (unlikely(period_len & ((1 << reg_width) - 1)))
1234                 goto out_err;
1235         if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1236                 goto out_err;
1237         if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
1238                 goto out_err;
1239
1240         retval = ERR_PTR(-ENOMEM);
1241
1242         if (periods > NR_DESCS_PER_CHANNEL)
1243                 goto out_err;
1244
1245         cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL);
1246         if (!cdesc)
1247                 goto out_err;
1248
1249         cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL);
1250         if (!cdesc->desc)
1251                 goto out_err_alloc;
1252
1253         for (i = 0; i < periods; i++) {
1254                 desc = dwc_desc_get(dwc);
1255                 if (!desc)
1256                         goto out_err_desc_get;
1257
1258                 switch (direction) {
1259                 case DMA_MEM_TO_DEV:
1260                         desc->lli.dar = sconfig->dst_addr;
1261                         desc->lli.sar = buf_addr + (period_len * i);
1262                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1263                                         | DWC_CTLL_DST_WIDTH(reg_width)
1264                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1265                                         | DWC_CTLL_DST_FIX
1266                                         | DWC_CTLL_SRC_INC
1267                                         | DWC_CTLL_INT_EN);
1268
1269                         desc->lli.ctllo |= sconfig->device_fc ?
1270                                 DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
1271                                 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
1272
1273                         break;
1274                 case DMA_DEV_TO_MEM:
1275                         desc->lli.dar = buf_addr + (period_len * i);
1276                         desc->lli.sar = sconfig->src_addr;
1277                         desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan)
1278                                         | DWC_CTLL_SRC_WIDTH(reg_width)
1279                                         | DWC_CTLL_DST_WIDTH(reg_width)
1280                                         | DWC_CTLL_DST_INC
1281                                         | DWC_CTLL_SRC_FIX
1282                                         | DWC_CTLL_INT_EN);
1283
1284                         desc->lli.ctllo |= sconfig->device_fc ?
1285                                 DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
1286                                 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
1287
1288                         break;
1289                 default:
1290                         break;
1291                 }
1292
1293                 desc->lli.ctlhi = (period_len >> reg_width);
1294                 cdesc->desc[i] = desc;
1295
1296                 if (last) {
1297                         last->lli.llp = desc->txd.phys;
1298                         dma_sync_single_for_device(chan2parent(chan),
1299                                         last->txd.phys, sizeof(last->lli),
1300                                         DMA_TO_DEVICE);
1301                 }
1302
1303                 last = desc;
1304         }
1305
1306         /* lets make a cyclic list */
1307         last->lli.llp = cdesc->desc[0]->txd.phys;
1308         dma_sync_single_for_device(chan2parent(chan), last->txd.phys,
1309                         sizeof(last->lli), DMA_TO_DEVICE);
1310
1311         dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu "
1312                         "period %zu periods %d\n", buf_addr, buf_len,
1313                         period_len, periods);
1314
1315         cdesc->periods = periods;
1316         dwc->cdesc = cdesc;
1317
1318         return cdesc;
1319
1320 out_err_desc_get:
1321         while (i--)
1322                 dwc_desc_put(dwc, cdesc->desc[i]);
1323 out_err_alloc:
1324         kfree(cdesc);
1325 out_err:
1326         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1327         return (struct dw_cyclic_desc *)retval;
1328 }
1329 EXPORT_SYMBOL(dw_dma_cyclic_prep);
1330
1331 /**
1332  * dw_dma_cyclic_free - free a prepared cyclic DMA transfer
1333  * @chan: the DMA channel to free
1334  */
1335 void dw_dma_cyclic_free(struct dma_chan *chan)
1336 {
1337         struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
1338         struct dw_dma           *dw = to_dw_dma(dwc->chan.device);
1339         struct dw_cyclic_desc   *cdesc = dwc->cdesc;
1340         int                     i;
1341         unsigned long           flags;
1342
1343         dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
1344
1345         if (!cdesc)
1346                 return;
1347
1348         spin_lock_irqsave(&dwc->lock, flags);
1349
1350         channel_clear_bit(dw, CH_EN, dwc->mask);
1351         while (dma_readl(dw, CH_EN) & dwc->mask)
1352                 cpu_relax();
1353
1354         dma_writel(dw, CLEAR.ERROR, dwc->mask);
1355         dma_writel(dw, CLEAR.XFER, dwc->mask);
1356
1357         spin_unlock_irqrestore(&dwc->lock, flags);
1358
1359         for (i = 0; i < cdesc->periods; i++)
1360                 dwc_desc_put(dwc, cdesc->desc[i]);
1361
1362         kfree(cdesc->desc);
1363         kfree(cdesc);
1364
1365         clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
1366 }
1367 EXPORT_SYMBOL(dw_dma_cyclic_free);
1368
1369 /*----------------------------------------------------------------------*/
1370
1371 static void dw_dma_off(struct dw_dma *dw)
1372 {
1373         int i;
1374
1375         dma_writel(dw, CFG, 0);
1376
1377         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1378         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1379         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1380         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1381
1382         while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
1383                 cpu_relax();
1384
1385         for (i = 0; i < dw->dma.chancnt; i++)
1386                 dw->chan[i].initialized = false;
1387 }
1388
1389 static int __init dw_probe(struct platform_device *pdev)
1390 {
1391         struct dw_dma_platform_data *pdata;
1392         struct resource         *io;
1393         struct dw_dma           *dw;
1394         size_t                  size;
1395         int                     irq;
1396         int                     err;
1397         int                     i;
1398
1399         pdata = dev_get_platdata(&pdev->dev);
1400         if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS)
1401                 return -EINVAL;
1402
1403         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1404         if (!io)
1405                 return -EINVAL;
1406
1407         irq = platform_get_irq(pdev, 0);
1408         if (irq < 0)
1409                 return irq;
1410
1411         size = sizeof(struct dw_dma);
1412         size += pdata->nr_channels * sizeof(struct dw_dma_chan);
1413         dw = kzalloc(size, GFP_KERNEL);
1414         if (!dw)
1415                 return -ENOMEM;
1416
1417         if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) {
1418                 err = -EBUSY;
1419                 goto err_kfree;
1420         }
1421
1422         dw->regs = ioremap(io->start, DW_REGLEN);
1423         if (!dw->regs) {
1424                 err = -ENOMEM;
1425                 goto err_release_r;
1426         }
1427
1428         dw->clk = clk_get(&pdev->dev, "hclk");
1429         if (IS_ERR(dw->clk)) {
1430                 err = PTR_ERR(dw->clk);
1431                 goto err_clk;
1432         }
1433         clk_prepare_enable(dw->clk);
1434
1435         /* force dma off, just in case */
1436         dw_dma_off(dw);
1437
1438         err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw);
1439         if (err)
1440                 goto err_irq;
1441
1442         platform_set_drvdata(pdev, dw);
1443
1444         tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1445
1446         dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1447
1448         INIT_LIST_HEAD(&dw->dma.channels);
1449         for (i = 0; i < pdata->nr_channels; i++) {
1450                 struct dw_dma_chan      *dwc = &dw->chan[i];
1451
1452                 dwc->chan.device = &dw->dma;
1453                 dma_cookie_init(&dwc->chan);
1454                 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1455                         list_add_tail(&dwc->chan.device_node,
1456                                         &dw->dma.channels);
1457                 else
1458                         list_add(&dwc->chan.device_node, &dw->dma.channels);
1459
1460                 /* 7 is highest priority & 0 is lowest. */
1461                 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1462                         dwc->priority = pdata->nr_channels - i - 1;
1463                 else
1464                         dwc->priority = i;
1465
1466                 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1467                 spin_lock_init(&dwc->lock);
1468                 dwc->mask = 1 << i;
1469
1470                 INIT_LIST_HEAD(&dwc->active_list);
1471                 INIT_LIST_HEAD(&dwc->queue);
1472                 INIT_LIST_HEAD(&dwc->free_list);
1473
1474                 channel_clear_bit(dw, CH_EN, dwc->mask);
1475         }
1476
1477         /* Clear/disable all interrupts on all channels. */
1478         dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1479         dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1480         dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1481         dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1482
1483         channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
1484         channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
1485         channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
1486         channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
1487
1488         dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1489         dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1490         if (pdata->is_private)
1491                 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1492         dw->dma.dev = &pdev->dev;
1493         dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1494         dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1495
1496         dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1497
1498         dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1499         dw->dma.device_control = dwc_control;
1500
1501         dw->dma.device_tx_status = dwc_tx_status;
1502         dw->dma.device_issue_pending = dwc_issue_pending;
1503
1504         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1505
1506         printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n",
1507                         dev_name(&pdev->dev), pdata->nr_channels);
1508
1509         dma_async_device_register(&dw->dma);
1510
1511         return 0;
1512
1513 err_irq:
1514         clk_disable_unprepare(dw->clk);
1515         clk_put(dw->clk);
1516 err_clk:
1517         iounmap(dw->regs);
1518         dw->regs = NULL;
1519 err_release_r:
1520         release_resource(io);
1521 err_kfree:
1522         kfree(dw);
1523         return err;
1524 }
1525
1526 static int __exit dw_remove(struct platform_device *pdev)
1527 {
1528         struct dw_dma           *dw = platform_get_drvdata(pdev);
1529         struct dw_dma_chan      *dwc, *_dwc;
1530         struct resource         *io;
1531
1532         dw_dma_off(dw);
1533         dma_async_device_unregister(&dw->dma);
1534
1535         free_irq(platform_get_irq(pdev, 0), dw);
1536         tasklet_kill(&dw->tasklet);
1537
1538         list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1539                         chan.device_node) {
1540                 list_del(&dwc->chan.device_node);
1541                 channel_clear_bit(dw, CH_EN, dwc->mask);
1542         }
1543
1544         clk_disable_unprepare(dw->clk);
1545         clk_put(dw->clk);
1546
1547         iounmap(dw->regs);
1548         dw->regs = NULL;
1549
1550         io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1551         release_mem_region(io->start, DW_REGLEN);
1552
1553         kfree(dw);
1554
1555         return 0;
1556 }
1557
1558 static void dw_shutdown(struct platform_device *pdev)
1559 {
1560         struct dw_dma   *dw = platform_get_drvdata(pdev);
1561
1562         dw_dma_off(platform_get_drvdata(pdev));
1563         clk_disable_unprepare(dw->clk);
1564 }
1565
1566 static int dw_suspend_noirq(struct device *dev)
1567 {
1568         struct platform_device *pdev = to_platform_device(dev);
1569         struct dw_dma   *dw = platform_get_drvdata(pdev);
1570
1571         dw_dma_off(platform_get_drvdata(pdev));
1572         clk_disable_unprepare(dw->clk);
1573
1574         return 0;
1575 }
1576
1577 static int dw_resume_noirq(struct device *dev)
1578 {
1579         struct platform_device *pdev = to_platform_device(dev);
1580         struct dw_dma   *dw = platform_get_drvdata(pdev);
1581
1582         clk_prepare_enable(dw->clk);
1583         dma_writel(dw, CFG, DW_CFG_DMA_EN);
1584         return 0;
1585 }
1586
1587 static const struct dev_pm_ops dw_dev_pm_ops = {
1588         .suspend_noirq = dw_suspend_noirq,
1589         .resume_noirq = dw_resume_noirq,
1590         .freeze_noirq = dw_suspend_noirq,
1591         .thaw_noirq = dw_resume_noirq,
1592         .restore_noirq = dw_resume_noirq,
1593         .poweroff_noirq = dw_suspend_noirq,
1594 };
1595
1596 #ifdef CONFIG_OF
1597 static const struct of_device_id dw_dma_id_table[] = {
1598         { .compatible = "snps,dma-spear1340" },
1599         {}
1600 };
1601 MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1602 #endif
1603
1604 static struct platform_driver dw_driver = {
1605         .remove         = __exit_p(dw_remove),
1606         .shutdown       = dw_shutdown,
1607         .driver = {
1608                 .name   = "dw_dmac",
1609                 .pm     = &dw_dev_pm_ops,
1610                 .of_match_table = of_match_ptr(dw_dma_id_table),
1611         },
1612 };
1613
1614 static int __init dw_init(void)
1615 {
1616         return platform_driver_probe(&dw_driver, dw_probe);
1617 }
1618 subsys_initcall(dw_init);
1619
1620 static void __exit dw_exit(void)
1621 {
1622         platform_driver_unregister(&dw_driver);
1623 }
1624 module_exit(dw_exit);
1625
1626 MODULE_LICENSE("GPL v2");
1627 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1628 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1629 MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");