dmaengine: dw: disable BLOCK IRQs for non-cyclic xfer
authorAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Wed, 10 Feb 2016 13:59:42 +0000 (15:59 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 3 Mar 2016 23:07:24 +0000 (15:07 -0800)
commit ee1cdcdae59563535485a5f56ee72c894ab7d7ad upstream.

The commit 2895b2cad6e7 ("dmaengine: dw: fix cyclic transfer callbacks")
re-enabled BLOCK interrupts with regard to make cyclic transfers work. However,
this change becomes a regression for non-cyclic transfers as interrupt counters
under stress test had been grown enormously (approximately per 4-5 bytes in the
UART loop back test).

Taking into consideration above enable BLOCK interrupts if and only if channel
is programmed to perform cyclic transfer.

Fixes: 2895b2cad6e7 ("dmaengine: dw: fix cyclic transfer callbacks")
Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Mans Rullgard <mans@mansr.com>
Tested-by: Mans Rullgard <mans@mansr.com>
Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/dma/dw/core.c

index b926627224046a807d3c64f2825577e5adcf81d2..4f099ea29f83e991349892d91f15c7d0f07d8e38 100644 (file)
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
 
        /* Enable interrupts */
        channel_set_bit(dw, MASK.XFER, dwc->mask);
-       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
        channel_set_bit(dw, MASK.ERROR, dwc->mask);
 
        dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
 
                spin_unlock_irqrestore(&dwc->lock, flags);
        }
+
+       /* Re-enable interrupts */
+       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
 }
 
 /* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
                        dwc_scan_descriptors(dw, dwc);
        }
 
-       /*
-        * Re-enable interrupts.
-        */
+       /* Re-enable interrupts */
        channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
-       channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
        channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
 }
 
@@ -1256,6 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
 int dw_dma_cyclic_start(struct dma_chan *chan)
 {
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
+       struct dw_dma           *dw = to_dw_dma(chan->device);
        unsigned long           flags;
 
        if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1264,7 +1264,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
        }
 
        spin_lock_irqsave(&dwc->lock, flags);
+
+       /* Enable interrupts to perform cyclic transfer */
+       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+
        dwc_dostart(dwc, dwc->cdesc->desc[0]);
+
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        return 0;