Merge tag 'lsk-v3.10-android-14.07' into develop-3.10
[firefly-linux-kernel-4.4.55.git] / drivers / spi / spi-rockchip-dma.c
1 /*
2  * Special handling for DW core on Intel MID platform
3  *
4  * Copyright (c) 2009, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/workqueue.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/clk.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmaengine.h>
30 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/spi/spi.h>
33 #include <linux/gpio.h>
34 #include <linux/of.h>
35 #include <linux/of_gpio.h>
36 #include <linux/platform_data/spi-rockchip.h>
37
38
39 #include "spi-rockchip-core.h"
40
41 #ifdef CONFIG_SPI_ROCKCHIP_DMA
42 #define DMA_BUFFER_SIZE (PAGE_SIZE<<4)
43
44
45 struct spi_dma_slave {
46         struct dma_chan *ch;
47         enum dma_transfer_direction direction;
48         unsigned int dmach;
49 };
50
51
52 struct spi_dma {
53         struct spi_dma_slave    dmas_tx;
54         struct spi_dma_slave    dmas_rx;
55 };
56
57 static void printk_transfer_data(struct dw_spi *dws, char *buf, int len)
58 {
59         int i = 0;
60         for(i=0; i<len; i++)
61                 DBG_SPI("0x%02x,",*buf++);
62
63         DBG_SPI("\n");
64
65 }
66
67 static int mid_spi_dma_init(struct dw_spi *dws)
68 {
69         struct spi_dma *dw_dma = dws->dma_priv;
70         struct spi_dma_slave *rxs, *txs;
71         
72         DBG_SPI("%s:start\n",__func__);
73
74         /* 1. Init rx channel */
75         dws->rxchan = dma_request_slave_channel(dws->parent_dev, "rx");
76         if (!dws->rxchan)
77         {
78                 dev_err(dws->parent_dev, "Failed to get RX DMA channel\n");
79                 goto err_exit;
80         }
81         
82         DBG_SPI("%s:rx_chan_id=%d\n",__func__,dws->rxchan->chan_id);
83         
84         rxs = &dw_dma->dmas_rx;
85         dws->rxchan->private = rxs;
86
87         /* 2. Init tx channel */
88         dws->txchan = dma_request_slave_channel(dws->parent_dev, "tx");
89         if (!dws->txchan)
90         {
91                 dev_err(dws->parent_dev, "Failed to get TX DMA channel\n");
92                 goto free_rxchan;
93         }
94         txs = &dw_dma->dmas_tx;
95         dws->txchan->private = txs;
96         
97         DBG_SPI("%s:tx_chan_id=%d\n",__func__,dws->txchan->chan_id);
98
99         dws->dma_inited = 1;
100
101         DBG_SPI("%s:line=%d\n",__func__,__LINE__);
102         return 0;
103
104 free_rxchan:
105         dma_release_channel(dws->rxchan);
106 err_exit:
107         return -1;
108
109 }
110
111 static void mid_spi_dma_exit(struct dw_spi *dws)
112 {
113         DBG_SPI("%s:start\n",__func__);
114         dma_release_channel(dws->txchan);
115         dma_release_channel(dws->rxchan);
116 }
117
118
119 static void dw_spi_dma_rxcb(void *arg)
120 {
121         struct dw_spi *dws = arg;
122         unsigned long flags;
123         struct dma_tx_state             state;
124         int                             dma_status;
125
126         dma_sync_single_for_device(dws->rxchan->device->dev, dws->rx_dma,
127                                    dws->len, DMA_FROM_DEVICE);
128         
129         dma_status = dmaengine_tx_status(dws->rxchan, dws->rx_cookie, &state);
130         
131         DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
132         
133         spin_lock_irqsave(&dws->lock, flags);           
134         if (dma_status == DMA_SUCCESS)
135                 dws->state &= ~RXBUSY;
136         else
137                 dev_err(&dws->master->dev, "error:rx dma_status=%x\n", dma_status);
138
139         //copy data from dma to transfer buf
140         if(dws->cur_transfer && (dws->cur_transfer->rx_buf != NULL))
141         {
142                 memcpy(dws->cur_transfer->rx_buf, dws->rx_buffer, dws->cur_transfer->len);
143                 
144                 DBG_SPI("dma rx:");
145                 printk_transfer_data(dws, dws->cur_transfer->rx_buf, dws->cur_transfer->len);
146         }
147         
148         spin_unlock_irqrestore(&dws->lock, flags);
149         
150         /* If the other done */
151         if (!(dws->state & TXBUSY))
152         {
153                 complete(&dws->xfer_completion);        
154                 DBG_SPI("%s:complete\n", __FUNCTION__);         
155                 //DMA could not lose intterupt
156                 dw_spi_xfer_done(dws);
157         }
158
159 }
160
161 static void dw_spi_dma_txcb(void *arg)
162 {
163         struct dw_spi *dws = arg;
164         unsigned long flags;
165         struct dma_tx_state             state;
166         int                             dma_status;
167
168         dma_sync_single_for_device(dws->txchan->device->dev, dws->tx_dma,
169                                    dws->len, DMA_TO_DEVICE);
170         
171         dma_status = dmaengine_tx_status(dws->txchan, dws->tx_cookie, &state);
172         
173         DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
174         DBG_SPI("dma tx:");
175         printk_transfer_data(dws, (char *)dws->cur_transfer->tx_buf, dws->cur_transfer->len);
176         
177         spin_lock_irqsave(&dws->lock, flags);
178         
179         if (dma_status == DMA_SUCCESS)
180                 dws->state &= ~TXBUSY;
181         else    
182                 dev_err(&dws->master->dev, "error:tx dma_status=%x\n", dma_status);     
183
184         spin_unlock_irqrestore(&dws->lock, flags);
185         
186         /* If the other done */
187         if (!(dws->state & RXBUSY)) 
188         {
189                 complete(&dws->xfer_completion);                
190                 DBG_SPI("%s:complete\n", __FUNCTION__);
191
192                 //DMA could not lose intterupt
193                 dw_spi_xfer_done(dws);
194         }
195
196 }
197
198
199 static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
200 {
201         struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
202         struct dma_chan *txchan, *rxchan;
203         struct dma_slave_config txconf, rxconf;
204         int ret = 0;
205         
206         enum dma_slave_buswidth width;
207
208         DBG_SPI("%s:cs_change=%d\n",__func__,cs_change);
209         
210         //alloc dma buffer default while cur_transfer->tx_dma or cur_transfer->rx_dma is null
211         if((dws->cur_transfer->tx_buf) && dws->dma_mapped && (!dws->cur_transfer->tx_dma))
212         {
213                 //printk("%s:warning tx_dma is %p\n",__func__, (int *)dws->tx_dma);
214                 memcpy(dws->tx_buffer, dws->cur_transfer->tx_buf, dws->cur_transfer->len);              
215                 dws->tx_dma = dws->tx_dma_init;
216         }
217
218         if((dws->cur_transfer->rx_buf) && dws->dma_mapped && (!dws->cur_transfer->rx_dma))
219         {               
220                 //printk("%s:warning rx_dma is %p\n",__func__, (int *)dws->rx_dma);
221                 dws->rx_dma = dws->rx_dma_init;
222         }
223
224         
225         if (dws->tx)
226                 dws->state |= TXBUSY;   
227         if (dws->rx)
228                 dws->state |= RXBUSY;
229
230         
231         switch (dws->n_bytes) {
232         case 1:
233                 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
234                 break;
235         case 2:
236                 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
237                 break;
238         default:
239                 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
240                 break;
241         }
242                 
243         dws->dma_chan_done = 0;
244         
245         if (dws->tx)
246         txchan = dws->txchan;
247         
248         if (dws->rx)
249         rxchan = dws->rxchan;
250         
251         if (dws->tx)
252         {
253                 /* 2. Prepare the TX dma transfer */
254                 txconf.direction = DMA_MEM_TO_DEV;
255                 txconf.dst_addr = dws->tx_dma_addr;
256                 txconf.dst_maxburst = dws->dma_width;
257                 //txconf.src_addr_width = width;
258                 txconf.dst_addr_width = width;
259                 //txconf.device_fc = false;
260
261                 ret = dmaengine_slave_config(txchan, &txconf);
262                 if (ret) {
263                         dev_warn(dws->parent_dev, "TX DMA slave config failed\n");
264                         return -1;
265                 }
266                 
267                 memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
268                 dws->tx_sgl.dma_address = dws->tx_dma;
269                 dws->tx_sgl.length = dws->len;
270
271                 txdesc = dmaengine_prep_slave_sg(txchan,
272                                         &dws->tx_sgl,
273                                         1,
274                                         DMA_MEM_TO_DEV,
275                                         DMA_PREP_INTERRUPT);
276                 
277                 txdesc->callback = dw_spi_dma_txcb;
278                 txdesc->callback_param = dws;
279
280                 DBG_SPI("%s:dst_addr=0x%p,tx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__,(int *)dws->tx_dma_addr, (int *)dws->tx_dma, dws->len,dws->dma_width, width);
281         }
282
283         if (dws->rx)
284         {
285                 /* 3. Prepare the RX dma transfer */
286                 rxconf.direction = DMA_DEV_TO_MEM;
287                 rxconf.src_addr = dws->rx_dma_addr;
288                 rxconf.src_maxburst = dws->dma_width; 
289                 //rxconf.dst_addr_width = width;
290                 rxconf.src_addr_width = width;
291                 //rxconf.device_fc = false;
292
293                 ret = dmaengine_slave_config(rxchan, &rxconf);
294                 if (ret) {
295                         dev_warn(dws->parent_dev, "RX DMA slave config failed\n");
296                         return -1;
297                 }
298
299                 memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
300                 dws->rx_sgl.dma_address = dws->rx_dma;
301                 dws->rx_sgl.length = dws->len;                          
302
303                 rxdesc = dmaengine_prep_slave_sg(rxchan,
304                                         &dws->rx_sgl,
305                                         1,
306                                         DMA_DEV_TO_MEM,
307                                         DMA_PREP_INTERRUPT);
308                 rxdesc->callback = dw_spi_dma_rxcb;
309                 rxdesc->callback_param = dws;
310                 
311                 DBG_SPI("%s:src_addr=0x%p,rx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__, (int *)dws->rx_dma_addr, (int *)dws->rx_dma, dws->len, dws->dma_width, width);
312         }
313         
314         /* rx must be started before tx due to spi instinct */  
315         if (dws->rx)
316         {               
317                 dws->rx_cookie = dmaengine_submit(rxdesc);
318                 dma_sync_single_for_device(rxchan->device->dev, dws->rx_dma,
319                                    dws->len, DMA_FROM_DEVICE);
320                 dma_async_issue_pending(rxchan);
321                 
322                 DBG_SPI("%s:rx end\n",__func__);
323         }
324         
325         if (dws->tx)
326         {               
327                 dws->tx_cookie = dmaengine_submit(txdesc);
328                 dma_sync_single_for_device(txchan->device->dev, dws->tx_dma,
329                                    dws->len, DMA_TO_DEVICE);
330                 dma_async_issue_pending(txchan);
331                 
332                 DBG_SPI("%s:tx end\n",__func__);
333         }
334         
335         return 0;
336 }
337
338 static struct dw_spi_dma_ops spi_dma_ops = {
339         .dma_init       = mid_spi_dma_init,
340         .dma_exit       = mid_spi_dma_exit,
341         .dma_transfer   = mid_spi_dma_transfer,
342 };
343
344 int dw_spi_dma_init(struct dw_spi *dws)
345 {
346         DBG_SPI("%s:start\n",__func__);
347         dws->dma_priv = kzalloc(sizeof(struct spi_dma), GFP_KERNEL);
348         if (!dws->dma_priv)
349                 return -ENOMEM;
350         dws->dma_ops = &spi_dma_ops;
351
352         dws->tx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->tx_dma_init, GFP_KERNEL | GFP_DMA);
353         if (!dws->tx_buffer)
354         {
355                 dev_err(dws->parent_dev, "fail to dma tx buffer alloc\n");
356                 return -1;
357         }
358
359         dws->rx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->rx_dma_init, GFP_KERNEL | GFP_DMA);
360         if (!dws->rx_buffer)
361         {
362                 dev_err(dws->parent_dev, "fail to dma rx buffer alloc\n");
363                 return -1;
364         }
365
366         memset(dws->tx_buffer, 0, DMA_BUFFER_SIZE);
367         memset(dws->rx_buffer, 0, DMA_BUFFER_SIZE);
368
369         dws->state = 0;
370         
371         init_completion(&dws->xfer_completion);
372         
373         return 0;
374 }
375 #endif
376