2 * Special handling for DW core on Intel MID platform
4 * Copyright (c) 2009, Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/workqueue.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/clk.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmaengine.h>
30 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/spi/spi.h>
33 #include <linux/gpio.h>
35 #include <linux/of_gpio.h>
36 #include <linux/platform_data/spi-rockchip.h>
39 #include "spi-rockchip-core.h"
41 #ifdef CONFIG_SPI_ROCKCHIP_DMA
42 #define DMA_BUFFER_SIZE (PAGE_SIZE<<4)
45 struct spi_dma_slave {
47 enum dma_transfer_direction direction;
53 struct spi_dma_slave dmas_tx;
54 struct spi_dma_slave dmas_rx;
57 static void printk_transfer_data(struct dw_spi *dws, char *buf, int len)
61 DBG_SPI("0x%02x,",*buf++);
67 static int mid_spi_dma_init(struct dw_spi *dws)
69 struct spi_dma *dw_dma = dws->dma_priv;
70 struct spi_dma_slave *rxs, *txs;
72 DBG_SPI("%s:start\n",__func__);
74 /* 1. Init rx channel */
75 dws->rxchan = dma_request_slave_channel(dws->parent_dev, "rx");
78 dev_err(dws->parent_dev, "Failed to get RX DMA channel\n");
82 DBG_SPI("%s:rx_chan_id=%d\n",__func__,dws->rxchan->chan_id);
84 rxs = &dw_dma->dmas_rx;
85 dws->rxchan->private = rxs;
87 /* 2. Init tx channel */
88 dws->txchan = dma_request_slave_channel(dws->parent_dev, "tx");
91 dev_err(dws->parent_dev, "Failed to get TX DMA channel\n");
94 txs = &dw_dma->dmas_tx;
95 dws->txchan->private = txs;
97 DBG_SPI("%s:tx_chan_id=%d\n",__func__,dws->txchan->chan_id);
101 DBG_SPI("%s:line=%d\n",__func__,__LINE__);
105 dma_release_channel(dws->rxchan);
111 static void mid_spi_dma_exit(struct dw_spi *dws)
113 DBG_SPI("%s:start\n",__func__);
114 dma_release_channel(dws->txchan);
115 dma_release_channel(dws->rxchan);
119 static void dw_spi_dma_rxcb(void *arg)
121 struct dw_spi *dws = arg;
123 struct dma_tx_state state;
126 dma_sync_single_for_device(dws->rxchan->device->dev, dws->rx_dma,
127 dws->len, DMA_FROM_DEVICE);
129 dma_status = dmaengine_tx_status(dws->rxchan, dws->rx_cookie, &state);
131 DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
133 spin_lock_irqsave(&dws->lock, flags);
134 if (dma_status == DMA_SUCCESS)
135 dws->state &= ~RXBUSY;
137 dev_err(&dws->master->dev, "error:rx dma_status=%x\n", dma_status);
139 //copy data from dma to transfer buf
140 if(dws->cur_transfer && (dws->cur_transfer->rx_buf != NULL))
142 memcpy(dws->cur_transfer->rx_buf, dws->rx_buffer, dws->cur_transfer->len);
145 printk_transfer_data(dws, dws->cur_transfer->rx_buf, dws->cur_transfer->len);
148 spin_unlock_irqrestore(&dws->lock, flags);
150 /* If the other done */
151 if (!(dws->state & TXBUSY))
153 //DMA could not lose intterupt
154 dw_spi_xfer_done(dws);
155 complete(&dws->xfer_completion);
156 DBG_SPI("%s:complete\n", __FUNCTION__);
161 static void dw_spi_dma_txcb(void *arg)
163 struct dw_spi *dws = arg;
165 struct dma_tx_state state;
168 dma_sync_single_for_device(dws->txchan->device->dev, dws->tx_dma,
169 dws->len, DMA_TO_DEVICE);
171 dma_status = dmaengine_tx_status(dws->txchan, dws->tx_cookie, &state);
173 DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
175 printk_transfer_data(dws, (char *)dws->cur_transfer->tx_buf, dws->cur_transfer->len);
177 spin_lock_irqsave(&dws->lock, flags);
179 if (dma_status == DMA_SUCCESS)
180 dws->state &= ~TXBUSY;
182 dev_err(&dws->master->dev, "error:tx dma_status=%x\n", dma_status);
184 spin_unlock_irqrestore(&dws->lock, flags);
186 /* If the other done */
187 if (!(dws->state & RXBUSY))
189 //DMA could not lose intterupt
190 dw_spi_xfer_done(dws);
191 complete(&dws->xfer_completion);
192 DBG_SPI("%s:complete\n", __FUNCTION__);
198 static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
200 struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
201 struct dma_chan *txchan, *rxchan;
202 struct dma_slave_config txconf, rxconf;
205 enum dma_slave_buswidth width;
207 DBG_SPI("%s:cs_change=%d\n",__func__,cs_change);
209 //alloc dma buffer default while cur_transfer->tx_dma or cur_transfer->rx_dma is null
210 if((dws->cur_transfer->tx_buf) && dws->dma_mapped && (!dws->cur_transfer->tx_dma))
212 //printk("%s:warning tx_dma is %p\n",__func__, (int *)dws->tx_dma);
213 memcpy(dws->tx_buffer, dws->cur_transfer->tx_buf, dws->cur_transfer->len);
214 dws->tx_dma = dws->tx_dma_init;
217 if((dws->cur_transfer->rx_buf) && dws->dma_mapped && (!dws->cur_transfer->rx_dma))
219 //printk("%s:warning rx_dma is %p\n",__func__, (int *)dws->rx_dma);
220 dws->rx_dma = dws->rx_dma_init;
225 dws->state |= TXBUSY;
227 dws->state |= RXBUSY;
230 switch (dws->n_bytes) {
232 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
235 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
238 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
242 dws->dma_chan_done = 0;
245 txchan = dws->txchan;
248 rxchan = dws->rxchan;
252 /* 2. Prepare the TX dma transfer */
253 txconf.direction = DMA_MEM_TO_DEV;
254 txconf.dst_addr = dws->tx_dma_addr;
255 txconf.dst_maxburst = dws->dmatdlr;//dws->dma_width;
256 //txconf.src_addr_width = width;
257 txconf.dst_addr_width = width;
258 //txconf.device_fc = false;
260 ret = dmaengine_slave_config(txchan, &txconf);
262 dev_warn(dws->parent_dev, "TX DMA slave config failed\n");
266 memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
267 dws->tx_sgl.dma_address = dws->tx_dma;
268 dws->tx_sgl.length = dws->len;
270 txdesc = dmaengine_prep_slave_sg(txchan,
276 txdesc->callback = dw_spi_dma_txcb;
277 txdesc->callback_param = dws;
279 DBG_SPI("%s:dst_addr=0x%p,tx_dma=0x%p,len=%ld,burst=%d,width=%d\n"
280 ,__func__,(int *)dws->tx_dma_addr, (int *)dws->tx_dma, (long)dws->len,txconf.dst_maxburst, width);
285 /* 3. Prepare the RX dma transfer */
286 rxconf.direction = DMA_DEV_TO_MEM;
287 rxconf.src_addr = dws->rx_dma_addr;
288 rxconf.src_maxburst = dws->dmardlr + 1;//dws->dma_width;
289 //rxconf.dst_addr_width = width;
290 rxconf.src_addr_width = width;
291 //rxconf.device_fc = false;
293 ret = dmaengine_slave_config(rxchan, &rxconf);
295 dev_warn(dws->parent_dev, "RX DMA slave config failed\n");
299 memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
300 dws->rx_sgl.dma_address = dws->rx_dma;
301 dws->rx_sgl.length = dws->len;
303 rxdesc = dmaengine_prep_slave_sg(rxchan,
308 rxdesc->callback = dw_spi_dma_rxcb;
309 rxdesc->callback_param = dws;
311 DBG_SPI("%s:src_addr=0x%p,rx_dma=0x%p,len=%ld,burst=%d,width=%d\n"
312 ,__func__, (int *)dws->rx_dma_addr, (int *)dws->rx_dma, (long)dws->len, rxconf.src_maxburst, width);
316 spi_enable_chip(dws, 1);
318 /* rx must be started before tx due to spi instinct */
321 dws->rx_cookie = dmaengine_submit(rxdesc);
322 dma_sync_single_for_device(rxchan->device->dev, dws->rx_dma,
323 dws->len, DMA_FROM_DEVICE);
324 dma_async_issue_pending(rxchan);
326 DBG_SPI("%s:rx end\n",__func__);
331 dws->tx_cookie = dmaengine_submit(txdesc);
332 dma_sync_single_for_device(txchan->device->dev, dws->tx_dma,
333 dws->len, DMA_TO_DEVICE);
334 dma_async_issue_pending(txchan);
336 DBG_SPI("%s:tx end\n",__func__);
342 static struct dw_spi_dma_ops spi_dma_ops = {
343 .dma_init = mid_spi_dma_init,
344 .dma_exit = mid_spi_dma_exit,
345 .dma_transfer = mid_spi_dma_transfer,
348 int dw_spi_dma_init(struct dw_spi *dws)
350 DBG_SPI("%s:start\n",__func__);
351 dws->dma_priv = kzalloc(sizeof(struct spi_dma), GFP_KERNEL);
354 dws->dma_ops = &spi_dma_ops;
356 dws->tx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->tx_dma_init, GFP_KERNEL | GFP_DMA);
359 dev_err(dws->parent_dev, "fail to dma tx buffer alloc\n");
363 dws->rx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->rx_dma_init, GFP_KERNEL | GFP_DMA);
366 dev_err(dws->parent_dev, "fail to dma rx buffer alloc\n");
370 memset(dws->tx_buffer, 0, DMA_BUFFER_SIZE);
371 memset(dws->rx_buffer, 0, DMA_BUFFER_SIZE);
375 init_completion(&dws->xfer_completion);