Merge tag 'lsk-android-14.03' into develop-3.10
[firefly-linux-kernel-4.4.55.git] / drivers / spi / spi-rockchip-dma.c
1 /*
2  * Special handling for DW core on Intel MID platform
3  *
4  * Copyright (c) 2009, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/workqueue.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/clk.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmaengine.h>
30 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/spi/spi.h>
33 #include <linux/gpio.h>
34 #include <linux/of.h>
35 #include <linux/of_gpio.h>
36 #include <linux/platform_data/spi-rockchip.h>
37
38
39 #include "spi-rockchip-core.h"
40
41 #ifdef CONFIG_SPI_ROCKCHIP_DMA
42 #define DMA_BUFFER_SIZE (PAGE_SIZE<<4)
43
44
45 struct spi_dma_slave {
46         struct dma_chan *ch;
47         enum dma_transfer_direction direction;
48         unsigned int dmach;
49 };
50
51
52 struct spi_dma {
53         struct spi_dma_slave    dmas_tx;
54         struct spi_dma_slave    dmas_rx;
55 };
56
57 static void printk_transfer_data(struct dw_spi *dws, char *buf, int len)
58 {
59         int i = 0;
60         for(i=0; i<len; i++)
61                 DBG_SPI("0x%02x,",*buf++);
62
63         DBG_SPI("\n");
64
65 }
66
67 static int mid_spi_dma_init(struct dw_spi *dws)
68 {
69         struct spi_dma *dw_dma = dws->dma_priv;
70         struct spi_dma_slave *rxs, *txs;
71         dma_cap_mask_t mask;
72         
73         DBG_SPI("%s:start\n",__func__);
74
75         /* 1. Init rx channel */
76         dws->rxchan = dma_request_slave_channel(dws->parent_dev, "rx");
77         if (!dws->rxchan)
78         {
79                 dev_err(dws->parent_dev, "Failed to get RX DMA channel\n");
80                 goto err_exit;
81         }
82         
83         DBG_SPI("%s:rx_chan_id=%d\n",__func__,dws->rxchan->chan_id);
84         
85         rxs = &dw_dma->dmas_rx;
86         dws->rxchan->private = rxs;
87
88         /* 2. Init tx channel */
89         dws->txchan = dma_request_slave_channel(dws->parent_dev, "tx");
90         if (!dws->rxchan)
91         {
92                 dev_err(dws->parent_dev, "Failed to get RX DMA channel\n");
93                 goto err_exit;
94         }
95         txs = &dw_dma->dmas_tx;
96         dws->txchan->private = txs;
97         
98         DBG_SPI("%s:tx_chan_id=%d\n",__func__,dws->txchan->chan_id);
99
100         dws->dma_inited = 1;
101
102         DBG_SPI("%s:line=%d\n",__func__,__LINE__);
103         return 0;
104
105 free_rxchan:
106         dma_release_channel(dws->rxchan);
107 err_exit:
108         return -1;
109
110 }
111
112 static void mid_spi_dma_exit(struct dw_spi *dws)
113 {
114         DBG_SPI("%s:start\n",__func__);
115         dma_release_channel(dws->txchan);
116         dma_release_channel(dws->rxchan);
117 }
118
119
120 static void dw_spi_dma_rxcb(void *arg)
121 {
122         struct dw_spi *dws = arg;
123         unsigned long flags;
124         struct dma_tx_state             state;
125         int                             dma_status;
126
127         dma_sync_single_for_device(dws->rxchan->device->dev, dws->rx_dma,
128                                    dws->len, DMA_FROM_DEVICE);
129         
130         dma_status = dmaengine_tx_status(dws->rxchan, dws->rx_cookie, &state);
131         
132         DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
133         
134         spin_lock_irqsave(&dws->lock, flags);           
135         if (dma_status == DMA_SUCCESS)
136                 dws->state &= ~RXBUSY;
137         else
138                 dev_err(&dws->master->dev, "error:rx dma_status=%x\n", dma_status);
139
140         //copy data from dma to transfer buf
141         if(dws->cur_transfer && (dws->cur_transfer->rx_buf != NULL))
142         {
143                 memcpy(dws->cur_transfer->rx_buf, dws->rx_buffer, dws->cur_transfer->len);
144                 
145                 DBG_SPI("dma rx:");
146                 printk_transfer_data(dws, dws->cur_transfer->rx_buf, dws->cur_transfer->len);
147         }
148         
149         spin_unlock_irqrestore(&dws->lock, flags);
150         
151         /* If the other done */
152         if (!(dws->state & TXBUSY))
153         {
154                 complete(&dws->xfer_completion);        
155                 DBG_SPI("%s:complete\n", __FUNCTION__);         
156                 //DMA could not lose intterupt
157                 dw_spi_xfer_done(dws);
158         }
159
160 }
161
162 static void dw_spi_dma_txcb(void *arg)
163 {
164         struct dw_spi *dws = arg;
165         unsigned long flags;
166         struct dma_tx_state             state;
167         int                             dma_status;
168
169         dma_sync_single_for_device(dws->txchan->device->dev, dws->tx_dma,
170                                    dws->len, DMA_TO_DEVICE);
171         
172         dma_status = dmaengine_tx_status(dws->txchan, dws->tx_cookie, &state);
173         
174         DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
175         DBG_SPI("dma tx:");
176         printk_transfer_data(dws, (char *)dws->cur_transfer->tx_buf, dws->cur_transfer->len);
177         
178         spin_lock_irqsave(&dws->lock, flags);
179         
180         if (dma_status == DMA_SUCCESS)
181                 dws->state &= ~TXBUSY;
182         else    
183                 dev_err(&dws->master->dev, "error:tx dma_status=%x\n", dma_status);     
184
185         spin_unlock_irqrestore(&dws->lock, flags);
186         
187         /* If the other done */
188         if (!(dws->state & RXBUSY)) 
189         {
190                 complete(&dws->xfer_completion);                
191                 DBG_SPI("%s:complete\n", __FUNCTION__);
192
193                 //DMA could not lose intterupt
194                 dw_spi_xfer_done(dws);
195         }
196
197 }
198
199
200 static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
201 {
202         struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
203         struct dma_chan *txchan, *rxchan;
204         struct dma_slave_config txconf, rxconf;
205         u16 dma_ctrl = 0;
206         int ret = 0;
207         
208         enum dma_slave_buswidth width;
209
210         DBG_SPI("%s:cs_change=%d\n",__func__,cs_change);
211         
212         //alloc dma buffer default while cur_transfer->tx_dma or cur_transfer->rx_dma is null
213         if((dws->cur_transfer->tx_buf) && dws->dma_mapped && (!dws->cur_transfer->tx_dma))
214         {
215                 //printk("%s:warning tx_dma is %p\n",__func__, (int *)dws->tx_dma);
216                 memcpy(dws->tx_buffer, dws->cur_transfer->tx_buf, dws->cur_transfer->len);              
217                 dws->tx_dma = dws->tx_dma_init;
218         }
219
220         if((dws->cur_transfer->rx_buf) && dws->dma_mapped && (!dws->cur_transfer->rx_dma))
221         {               
222                 //printk("%s:warning rx_dma is %p\n",__func__, (int *)dws->rx_dma);
223                 dws->rx_dma = dws->rx_dma_init;
224         }
225
226         
227         if (dws->tx)
228                 dws->state |= TXBUSY;   
229         if (dws->rx)
230                 dws->state |= RXBUSY;
231
232         
233         switch (dws->n_bytes) {
234         case 1:
235                 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
236                 break;
237         case 2:
238                 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
239                 break;
240         default:
241                 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
242                 break;
243         }
244                 
245         dws->dma_chan_done = 0;
246         
247         if (dws->tx)
248         txchan = dws->txchan;
249         
250         if (dws->rx)
251         rxchan = dws->rxchan;
252         
253         if (dws->tx)
254         {
255                 /* 2. Prepare the TX dma transfer */
256                 txconf.direction = DMA_MEM_TO_DEV;
257                 txconf.dst_addr = dws->tx_dma_addr;
258                 txconf.dst_maxburst = dws->dma_width;
259                 //txconf.src_addr_width = width;
260                 txconf.dst_addr_width = width;
261                 //txconf.device_fc = false;
262
263                 ret = dmaengine_slave_config(txchan, &txconf);
264                 if (ret) {
265                         dev_warn(dws->parent_dev, "TX DMA slave config failed\n");
266                         return -1;
267                 }
268                 
269                 memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
270                 dws->tx_sgl.dma_address = dws->tx_dma;
271                 dws->tx_sgl.length = dws->len;
272
273                 txdesc = dmaengine_prep_slave_sg(txchan,
274                                         &dws->tx_sgl,
275                                         1,
276                                         DMA_MEM_TO_DEV,
277                                         DMA_PREP_INTERRUPT);
278                 
279                 txdesc->callback = dw_spi_dma_txcb;
280                 txdesc->callback_param = dws;
281
282                 DBG_SPI("%s:dst_addr=0x%p,tx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__,(int *)dws->tx_dma_addr, (int *)dws->tx_dma, dws->len,dws->dma_width, width);
283         }
284
285         if (dws->rx)
286         {
287                 /* 3. Prepare the RX dma transfer */
288                 rxconf.direction = DMA_DEV_TO_MEM;
289                 rxconf.src_addr = dws->rx_dma_addr;
290                 rxconf.src_maxburst = dws->dma_width; 
291                 //rxconf.dst_addr_width = width;
292                 rxconf.src_addr_width = width;
293                 //rxconf.device_fc = false;
294
295                 ret = dmaengine_slave_config(rxchan, &rxconf);
296                 if (ret) {
297                         dev_warn(dws->parent_dev, "RX DMA slave config failed\n");
298                         return -1;
299                 }
300
301                 memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
302                 dws->rx_sgl.dma_address = dws->rx_dma;
303                 dws->rx_sgl.length = dws->len;                          
304
305                 rxdesc = dmaengine_prep_slave_sg(rxchan,
306                                         &dws->rx_sgl,
307                                         1,
308                                         DMA_DEV_TO_MEM,
309                                         DMA_PREP_INTERRUPT);
310                 rxdesc->callback = dw_spi_dma_rxcb;
311                 rxdesc->callback_param = dws;
312                 
313                 DBG_SPI("%s:src_addr=0x%p,rx_dma=0x%p,len=%d,burst=%d,width=%d\n",__func__, (int *)dws->rx_dma_addr, (int *)dws->rx_dma, dws->len, dws->dma_width, width);
314         }
315         
316         /* rx must be started before tx due to spi instinct */  
317         if (dws->rx)
318         {               
319                 dws->rx_cookie = dmaengine_submit(rxdesc);
320                 dma_sync_single_for_device(rxchan->device->dev, dws->rx_dma,
321                                    dws->len, DMA_FROM_DEVICE);
322                 dma_async_issue_pending(rxchan);
323                 
324                 DBG_SPI("%s:rx end\n",__func__);
325         }
326         
327         if (dws->tx)
328         {               
329                 dws->tx_cookie = dmaengine_submit(txdesc);
330                 dma_sync_single_for_device(txchan->device->dev, dws->tx_dma,
331                                    dws->len, DMA_TO_DEVICE);
332                 dma_async_issue_pending(txchan);
333                 
334                 DBG_SPI("%s:tx end\n",__func__);
335         }
336         
337         return 0;
338 }
339
340 static struct dw_spi_dma_ops spi_dma_ops = {
341         .dma_init       = mid_spi_dma_init,
342         .dma_exit       = mid_spi_dma_exit,
343         .dma_transfer   = mid_spi_dma_transfer,
344 };
345
346 int dw_spi_dma_init(struct dw_spi *dws)
347 {
348         DBG_SPI("%s:start\n",__func__);
349         dws->dma_priv = kzalloc(sizeof(struct spi_dma), GFP_KERNEL);
350         if (!dws->dma_priv)
351                 return -ENOMEM;
352         dws->dma_ops = &spi_dma_ops;
353
354         dws->tx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->tx_dma_init, GFP_KERNEL | GFP_DMA);
355         if (!dws->tx_buffer)
356         {
357                 dev_err(dws->parent_dev, "fail to dma tx buffer alloc\n");
358                 return -1;
359         }
360
361         dws->rx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->rx_dma_init, GFP_KERNEL | GFP_DMA);
362         if (!dws->rx_buffer)
363         {
364                 dev_err(dws->parent_dev, "fail to dma rx buffer alloc\n");
365                 return -1;
366         }
367
368         memset(dws->tx_buffer, 0, DMA_BUFFER_SIZE);
369         memset(dws->rx_buffer, 0, DMA_BUFFER_SIZE);
370
371         dws->state = 0;
372         
373         init_completion(&dws->xfer_completion);
374         
375         return 0;
376 }
377 #endif
378