rk3368: thermal: remove dbg log
[firefly-linux-kernel-4.4.55.git] / drivers / spi / spi-rockchip-dma.c
1 /*
2  * Special handling for DW core on Intel MID platform
3  *
4  * Copyright (c) 2009, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  */
19
20 #include <linux/interrupt.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/module.h>
24 #include <linux/workqueue.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <linux/clk.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/dmaengine.h>
30 #include <linux/platform_device.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/spi/spi.h>
33 #include <linux/gpio.h>
34 #include <linux/of.h>
35 #include <linux/of_gpio.h>
36 #include <linux/platform_data/spi-rockchip.h>
37
38
39 #include "spi-rockchip-core.h"
40
41 #ifdef CONFIG_SPI_ROCKCHIP_DMA
42 #define DMA_BUFFER_SIZE (PAGE_SIZE<<4)
43
44
45 struct spi_dma_slave {
46         struct dma_chan *ch;
47         enum dma_transfer_direction direction;
48         unsigned int dmach;
49 };
50
51
52 struct spi_dma {
53         struct spi_dma_slave    dmas_tx;
54         struct spi_dma_slave    dmas_rx;
55 };
56
57 static void printk_transfer_data(struct dw_spi *dws, char *buf, int len)
58 {
59         int i = 0;
60         for(i=0; i<len; i++)
61                 DBG_SPI("0x%02x,",*buf++);
62
63         DBG_SPI("\n");
64
65 }
66
67 static int mid_spi_dma_init(struct dw_spi *dws)
68 {
69         struct spi_dma *dw_dma = dws->dma_priv;
70         struct spi_dma_slave *rxs, *txs;
71         
72         DBG_SPI("%s:start\n",__func__);
73
74         /* 1. Init rx channel */
75         dws->rxchan = dma_request_slave_channel(dws->parent_dev, "rx");
76         if (!dws->rxchan)
77         {
78                 dev_err(dws->parent_dev, "Failed to get RX DMA channel\n");
79                 goto err_exit;
80         }
81         
82         DBG_SPI("%s:rx_chan_id=%d\n",__func__,dws->rxchan->chan_id);
83         
84         rxs = &dw_dma->dmas_rx;
85         dws->rxchan->private = rxs;
86
87         /* 2. Init tx channel */
88         dws->txchan = dma_request_slave_channel(dws->parent_dev, "tx");
89         if (!dws->txchan)
90         {
91                 dev_err(dws->parent_dev, "Failed to get TX DMA channel\n");
92                 goto free_rxchan;
93         }
94         txs = &dw_dma->dmas_tx;
95         dws->txchan->private = txs;
96         
97         DBG_SPI("%s:tx_chan_id=%d\n",__func__,dws->txchan->chan_id);
98
99         dws->dma_inited = 1;
100
101         DBG_SPI("%s:line=%d\n",__func__,__LINE__);
102         return 0;
103
104 free_rxchan:
105         dma_release_channel(dws->rxchan);
106 err_exit:
107         return -1;
108
109 }
110
111 static void mid_spi_dma_exit(struct dw_spi *dws)
112 {
113         DBG_SPI("%s:start\n",__func__);
114         dma_release_channel(dws->txchan);
115         dma_release_channel(dws->rxchan);
116 }
117
118
119 static void dw_spi_dma_rxcb(void *arg)
120 {
121         struct dw_spi *dws = arg;
122         unsigned long flags;
123         struct dma_tx_state             state;
124         int                             dma_status;
125
126         dma_sync_single_for_device(dws->rxchan->device->dev, dws->rx_dma,
127                                    dws->len, DMA_FROM_DEVICE);
128         
129         dma_status = dmaengine_tx_status(dws->rxchan, dws->rx_cookie, &state);
130         
131         DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
132         
133         spin_lock_irqsave(&dws->lock, flags);           
134         if (dma_status == DMA_SUCCESS)
135                 dws->state &= ~RXBUSY;
136         else
137                 dev_err(&dws->master->dev, "error:rx dma_status=%x\n", dma_status);
138
139         //copy data from dma to transfer buf
140         if(dws->cur_transfer && (dws->cur_transfer->rx_buf != NULL))
141         {
142                 memcpy(dws->cur_transfer->rx_buf, dws->rx_buffer, dws->cur_transfer->len);
143                 
144                 DBG_SPI("dma rx:");
145                 printk_transfer_data(dws, dws->cur_transfer->rx_buf, dws->cur_transfer->len);
146         }
147         
148         spin_unlock_irqrestore(&dws->lock, flags);
149         
150         /* If the other done */
151         if (!(dws->state & TXBUSY))
152         {
153                 //DMA could not lose intterupt
154                 dw_spi_xfer_done(dws);
155                 complete(&dws->xfer_completion);
156                 DBG_SPI("%s:complete\n", __FUNCTION__);
157         }
158
159 }
160
161 static void dw_spi_dma_txcb(void *arg)
162 {
163         struct dw_spi *dws = arg;
164         unsigned long flags;
165         struct dma_tx_state             state;
166         int                             dma_status;
167
168         dma_sync_single_for_device(dws->txchan->device->dev, dws->tx_dma,
169                                    dws->len, DMA_TO_DEVICE);
170         
171         dma_status = dmaengine_tx_status(dws->txchan, dws->tx_cookie, &state);
172         
173         DBG_SPI("%s:dma_status=0x%x\n", __FUNCTION__, dma_status);
174         DBG_SPI("dma tx:");
175         printk_transfer_data(dws, (char *)dws->cur_transfer->tx_buf, dws->cur_transfer->len);
176         
177         spin_lock_irqsave(&dws->lock, flags);
178         
179         if (dma_status == DMA_SUCCESS)
180                 dws->state &= ~TXBUSY;
181         else    
182                 dev_err(&dws->master->dev, "error:tx dma_status=%x\n", dma_status);     
183
184         spin_unlock_irqrestore(&dws->lock, flags);
185         
186         /* If the other done */
187         if (!(dws->state & RXBUSY))
188         {
189                 //DMA could not lose intterupt
190                 dw_spi_xfer_done(dws);
191                 complete(&dws->xfer_completion);
192                 DBG_SPI("%s:complete\n", __FUNCTION__);
193         }
194
195 }
196
197
198 static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
199 {
200         struct dma_async_tx_descriptor *txdesc = NULL, *rxdesc = NULL;
201         struct dma_chan *txchan, *rxchan;
202         struct dma_slave_config txconf, rxconf;
203         int ret = 0;
204         
205         enum dma_slave_buswidth width;
206
207         DBG_SPI("%s:cs_change=%d\n",__func__,cs_change);
208         
209         //alloc dma buffer default while cur_transfer->tx_dma or cur_transfer->rx_dma is null
210         if((dws->cur_transfer->tx_buf) && dws->dma_mapped && (!dws->cur_transfer->tx_dma))
211         {
212                 //printk("%s:warning tx_dma is %p\n",__func__, (int *)dws->tx_dma);
213                 memcpy(dws->tx_buffer, dws->cur_transfer->tx_buf, dws->cur_transfer->len);              
214                 dws->tx_dma = dws->tx_dma_init;
215         }
216
217         if((dws->cur_transfer->rx_buf) && dws->dma_mapped && (!dws->cur_transfer->rx_dma))
218         {               
219                 //printk("%s:warning rx_dma is %p\n",__func__, (int *)dws->rx_dma);
220                 dws->rx_dma = dws->rx_dma_init;
221         }
222
223         
224         if (dws->tx)
225                 dws->state |= TXBUSY;   
226         if (dws->rx)
227                 dws->state |= RXBUSY;
228
229         
230         switch (dws->n_bytes) {
231         case 1:
232                 width = DMA_SLAVE_BUSWIDTH_1_BYTE;
233                 break;
234         case 2:
235                 width = DMA_SLAVE_BUSWIDTH_2_BYTES;
236                 break;
237         default:
238                 width = DMA_SLAVE_BUSWIDTH_4_BYTES;
239                 break;
240         }
241                 
242         dws->dma_chan_done = 0;
243         
244         if (dws->tx)
245         txchan = dws->txchan;
246         
247         if (dws->rx)
248         rxchan = dws->rxchan;
249         
250         if (dws->tx)
251         {
252                 /* 2. Prepare the TX dma transfer */
253                 txconf.direction = DMA_MEM_TO_DEV;
254                 txconf.dst_addr = dws->tx_dma_addr;
255                 txconf.dst_maxburst = dws->dmatdlr;//dws->dma_width;
256                 //txconf.src_addr_width = width;
257                 txconf.dst_addr_width = width;
258                 //txconf.device_fc = false;
259
260                 ret = dmaengine_slave_config(txchan, &txconf);
261                 if (ret) {
262                         dev_warn(dws->parent_dev, "TX DMA slave config failed\n");
263                         return -1;
264                 }
265                 
266                 memset(&dws->tx_sgl, 0, sizeof(dws->tx_sgl));
267                 dws->tx_sgl.dma_address = dws->tx_dma;
268                 dws->tx_sgl.length = dws->len;
269
270                 txdesc = dmaengine_prep_slave_sg(txchan,
271                                         &dws->tx_sgl,
272                                         1,
273                                         DMA_MEM_TO_DEV,
274                                         DMA_PREP_INTERRUPT);
275                 
276                 txdesc->callback = dw_spi_dma_txcb;
277                 txdesc->callback_param = dws;
278
279                 DBG_SPI("%s:dst_addr=0x%p,tx_dma=0x%p,len=%ld,burst=%d,width=%d\n"
280                         ,__func__,(int *)dws->tx_dma_addr, (int *)dws->tx_dma, (long)dws->len,txconf.dst_maxburst, width);
281         }
282
283         if (dws->rx)
284         {
285                 /* 3. Prepare the RX dma transfer */
286                 rxconf.direction = DMA_DEV_TO_MEM;
287                 rxconf.src_addr = dws->rx_dma_addr;
288                 rxconf.src_maxburst = dws->dmardlr + 1;//dws->dma_width;
289                 //rxconf.dst_addr_width = width;
290                 rxconf.src_addr_width = width;
291                 //rxconf.device_fc = false;
292
293                 ret = dmaengine_slave_config(rxchan, &rxconf);
294                 if (ret) {
295                         dev_warn(dws->parent_dev, "RX DMA slave config failed\n");
296                         return -1;
297                 }
298
299                 memset(&dws->rx_sgl, 0, sizeof(dws->rx_sgl));
300                 dws->rx_sgl.dma_address = dws->rx_dma;
301                 dws->rx_sgl.length = dws->len;                          
302
303                 rxdesc = dmaengine_prep_slave_sg(rxchan,
304                                         &dws->rx_sgl,
305                                         1,
306                                         DMA_DEV_TO_MEM,
307                                         DMA_PREP_INTERRUPT);
308                 rxdesc->callback = dw_spi_dma_rxcb;
309                 rxdesc->callback_param = dws;
310                 
311                 DBG_SPI("%s:src_addr=0x%p,rx_dma=0x%p,len=%ld,burst=%d,width=%d\n"
312                         ,__func__, (int *)dws->rx_dma_addr, (int *)dws->rx_dma, (long)dws->len, rxconf.src_maxburst, width);
313         }
314
315         if(!dws->tx)
316         spi_enable_chip(dws, 1);
317         
318         /* rx must be started before tx due to spi instinct */  
319         if (dws->rx)
320         {               
321                 dws->rx_cookie = dmaengine_submit(rxdesc);
322                 dma_sync_single_for_device(rxchan->device->dev, dws->rx_dma,
323                                    dws->len, DMA_FROM_DEVICE);
324                 dma_async_issue_pending(rxchan);
325                 
326                 DBG_SPI("%s:rx end\n",__func__);
327         }
328         
329         if (dws->tx)
330         {               
331                 dws->tx_cookie = dmaengine_submit(txdesc);
332                 dma_sync_single_for_device(txchan->device->dev, dws->tx_dma,
333                                    dws->len, DMA_TO_DEVICE);
334                 dma_async_issue_pending(txchan);
335                 
336                 DBG_SPI("%s:tx end\n",__func__);
337         }
338         
339         return 0;
340 }
341
342 static struct dw_spi_dma_ops spi_dma_ops = {
343         .dma_init       = mid_spi_dma_init,
344         .dma_exit       = mid_spi_dma_exit,
345         .dma_transfer   = mid_spi_dma_transfer,
346 };
347
348 int dw_spi_dma_init(struct dw_spi *dws)
349 {
350         DBG_SPI("%s:start\n",__func__);
351         dws->dma_priv = kzalloc(sizeof(struct spi_dma), GFP_KERNEL);
352         if (!dws->dma_priv)
353                 return -ENOMEM;
354         dws->dma_ops = &spi_dma_ops;
355
356         dws->tx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->tx_dma_init, GFP_KERNEL | GFP_DMA);
357         if (!dws->tx_buffer)
358         {
359                 dev_err(dws->parent_dev, "fail to dma tx buffer alloc\n");
360                 return -1;
361         }
362
363         dws->rx_buffer = dma_alloc_coherent(dws->parent_dev, DMA_BUFFER_SIZE, &dws->rx_dma_init, GFP_KERNEL | GFP_DMA);
364         if (!dws->rx_buffer)
365         {
366                 dev_err(dws->parent_dev, "fail to dma rx buffer alloc\n");
367                 return -1;
368         }
369
370         memset(dws->tx_buffer, 0, DMA_BUFFER_SIZE);
371         memset(dws->rx_buffer, 0, DMA_BUFFER_SIZE);
372
373         dws->state = 0;
374         
375         init_completion(&dws->xfer_completion);
376         
377         return 0;
378 }
379 #endif
380