2 #include <linux/module.h>
\r
3 #include <linux/kernel.h>
\r
4 #include <linux/slab.h>
\r
5 #include <linux/dma-mapping.h>
\r
6 #include <linux/irq.h>
\r
7 #include <linux/interrupt.h>
\r
8 #include <linux/bootmem.h>
\r
10 #include <asm/cacheflush.h>
\r
11 #include <linux/platform_device.h>
\r
12 #include <linux/semaphore.h>
\r
13 #include <linux/clk.h>
\r
15 #define RKNAND_VERSION_AND_DATE "rknandbase v1.0 2014-03-31"
\r
18 #include <linux/of.h>
\r
21 struct rknand_info {
\r
25 int nand_suspend_state;
\r
26 int nand_shutdown_state;
\r
29 void (*rknand_suspend)(void);
\r
30 void (*rknand_resume)(void);
\r
31 void (*rknand_buffer_shutdown)(void);
\r
32 int (*rknand_exit)(void);
\r
34 int (*ftl_read) (int lun,int Index, int nSec, void *buf);
\r
35 int (*ftl_write) (int lun,int Index, int nSec, void *buf);
\r
36 void (*nand_timing_config)(unsigned long AHBnKHz);
\r
37 void (*rknand_dev_cache_flush)(void);
\r
42 struct rk_nandc_info
\r
45 void __iomem * reg_base ;
\r
48 struct clk *clk; // flash clk
\r
49 struct clk *hclk; // nandc clk
\r
50 struct clk *gclk; // flash clk gate
\r
53 struct rknand_info * gpNandInfo = NULL;
\r
54 static struct rk_nandc_info g_nandc_info[2];
\r
56 static char *cmdline=NULL;
\r
57 int rknand_get_part_info(char **s)
\r
62 EXPORT_SYMBOL(rknand_get_part_info);
\r
64 static char sn_data[512];
\r
65 static char vendor0[512];
\r
67 char GetSNSectorInfo(char * pbuf)
\r
69 memcpy(pbuf,sn_data,0x200);
\r
73 char GetSNSectorInfoBeforeNandInit(char * pbuf)
\r
75 memcpy(pbuf,sn_data,0x200);
\r
79 char GetVendor0InfoBeforeNandInit(char * pbuf)
\r
81 memcpy(pbuf,vendor0 + 8,504);
\r
85 int GetParamterInfo(char * pbuf , int len)
\r
91 void rknand_spin_lock_init(spinlock_t * p_lock)
\r
93 spin_lock_init(p_lock);
\r
95 EXPORT_SYMBOL(rknand_spin_lock_init);
\r
97 void rknand_spin_lock(spinlock_t * p_lock)
\r
99 spin_lock_irq(p_lock);
\r
101 EXPORT_SYMBOL(rknand_spin_lock);
\r
103 void rknand_spin_unlock(spinlock_t * p_lock)
\r
105 spin_unlock_irq(p_lock);
\r
107 EXPORT_SYMBOL(rknand_spin_unlock);
\r
110 struct semaphore g_rk_nand_ops_mutex;
\r
111 void rknand_device_lock_init(void)
\r
113 sema_init(&g_rk_nand_ops_mutex, 1);
\r
115 EXPORT_SYMBOL(rknand_device_lock_init);
\r
116 void rknand_device_lock (void)
\r
118 down(&g_rk_nand_ops_mutex);
\r
120 EXPORT_SYMBOL(rknand_device_lock);
\r
122 int rknand_device_trylock (void)
\r
124 return down_trylock(&g_rk_nand_ops_mutex);
\r
126 EXPORT_SYMBOL(rknand_device_trylock);
\r
128 void rknand_device_unlock (void)
\r
130 up(&g_rk_nand_ops_mutex);
\r
132 EXPORT_SYMBOL(rknand_device_unlock);
\r
135 int rk_nand_get_device(struct rknand_info ** prknand_Info)
\r
137 *prknand_Info = gpNandInfo;
\r
140 EXPORT_SYMBOL(rk_nand_get_device);
\r
142 unsigned long rknand_dma_flush_dcache(unsigned long ptr,int size,int dir)
\r
144 __cpuc_flush_dcache_area((void*)ptr, size + 63);
\r
145 return ((unsigned long )virt_to_phys((void *)ptr));
\r
147 EXPORT_SYMBOL(rknand_dma_flush_dcache);
\r
149 unsigned long rknand_dma_map_single(unsigned long ptr,int size,int dir)
\r
151 return dma_map_single(NULL,(void*)ptr,size, dir?DMA_TO_DEVICE:DMA_FROM_DEVICE);
\r
153 EXPORT_SYMBOL(rknand_dma_map_single);
\r
155 void rknand_dma_unmap_single(unsigned long ptr,int size,int dir)
\r
157 dma_unmap_single(NULL, (dma_addr_t)ptr,size, dir?DMA_TO_DEVICE:DMA_FROM_DEVICE);
\r
159 EXPORT_SYMBOL(rknand_dma_unmap_single);
\r
161 int rknand_flash_cs_init(int id)
\r
165 EXPORT_SYMBOL(rknand_flash_cs_init);
\r
167 int rknand_get_reg_addr(int *pNandc0,int *pNandc1,int *pSDMMC0,int *pSDMMC1,int *pSDMMC2)
\r
169 *pNandc0 = (int)g_nandc_info[0].reg_base;
\r
170 *pNandc1 = (int)g_nandc_info[1].reg_base;
\r
173 EXPORT_SYMBOL(rknand_get_reg_addr);
\r
175 int rknand_nandc_irq_init(int id,int mode,void * pfun)
\r
178 int irq= g_nandc_info[id].irq;
\r
182 ret = request_irq(irq, pfun, 0, "nandc", g_nandc_info[id].reg_base);
\r
184 //printk("request IRQ_NANDC %x irq %x, ret=%x.........\n",id,irq, ret);
\r
188 free_irq(irq, NULL);
\r
192 EXPORT_SYMBOL(rknand_nandc_irq_init);
\r
194 static int rknand_probe(struct platform_device *pdev)
\r
196 unsigned int id = 0;
\r
198 struct resource *mem;
\r
199 void __iomem *membase;
\r
201 if(gpNandInfo == NULL)
\r
203 gpNandInfo = kzalloc(sizeof(struct rknand_info), GFP_KERNEL);
\r
206 gpNandInfo->nand_suspend_state = 0;
\r
207 gpNandInfo->nand_shutdown_state = 0;
\r
209 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
\r
210 membase = devm_request_and_ioremap(&pdev->dev, mem);
\r
213 dev_err(&pdev->dev, "no reg resource?\n");
\r
216 //printk("rknand_probe %d %x %x\n", pdev->id,(int)mem,(int)membase);
\r
218 if(0==of_property_read_u32(pdev->dev.of_node, "nandc_id", &id))
\r
226 memcpy(vendor0,membase+0x1400,0x200);
\r
227 memcpy(sn_data,membase+0x1600,0x200);
\r
231 dev_err(&pdev->dev, "nandc id = %d error!\n",id);
\r
234 irq = platform_get_irq(pdev, 0);
\r
235 //printk("nand irq: %d\n",irq);
\r
237 dev_err(&pdev->dev, "no irq resource?\n");
\r
240 g_nandc_info[id].id = id;
\r
241 g_nandc_info[id].irq = irq;
\r
242 g_nandc_info[id].reg_base = membase;
\r
244 g_nandc_info[id].hclk = devm_clk_get(&pdev->dev, "hclk_nandc");
\r
245 g_nandc_info[id].clk = devm_clk_get(&pdev->dev, "clk_nandc");
\r
246 g_nandc_info[id].gclk = devm_clk_get(&pdev->dev, "g_clk_nandc");
\r
248 if (unlikely(IS_ERR(g_nandc_info[id].clk)) || unlikely(IS_ERR(g_nandc_info[id].hclk))
\r
249 || unlikely(IS_ERR(g_nandc_info[id].gclk))) {
\r
250 printk("rknand_probe get clk error\n");
\r
254 clk_set_rate(g_nandc_info[id].clk,150*1000*1000);
\r
255 g_nandc_info[id].clk_rate = clk_get_rate(g_nandc_info[id].clk );
\r
256 printk("rknand_probe clk rate = %d\n",g_nandc_info[id].clk_rate);
\r
257 gpNandInfo->clk_rate[id] = g_nandc_info[id].clk_rate;
\r
259 clk_prepare_enable( g_nandc_info[id].clk );
\r
260 clk_prepare_enable( g_nandc_info[id].hclk);
\r
261 clk_prepare_enable( g_nandc_info[id].gclk);
\r
265 static int rknand_suspend(struct platform_device *pdev, pm_message_t state)
\r
267 if(gpNandInfo->rknand_suspend && gpNandInfo->nand_suspend_state == 0){
\r
268 gpNandInfo->nand_suspend_state = 1;
\r
269 gpNandInfo->rknand_suspend();
\r
270 //TODO:nandc clk disable
\r
275 static int rknand_resume(struct platform_device *pdev)
\r
277 if(gpNandInfo->rknand_resume && gpNandInfo->nand_suspend_state == 1){
\r
278 gpNandInfo->nand_suspend_state = 0;
\r
279 //TODO:nandc clk enable
\r
280 gpNandInfo->rknand_resume();
\r
285 static void rknand_shutdown(struct platform_device *pdev)
\r
287 if(gpNandInfo->rknand_buffer_shutdown && gpNandInfo->nand_shutdown_state == 0){
\r
288 gpNandInfo->nand_shutdown_state = 1;
\r
289 gpNandInfo->rknand_buffer_shutdown();
\r
293 void rknand_dev_cache_flush(void)
\r
295 if(gpNandInfo->rknand_dev_cache_flush)
\r
296 gpNandInfo->rknand_dev_cache_flush();
\r
300 static const struct of_device_id of_rk_nandc_match[] = {
\r
301 { .compatible = "rockchip,rk-nandc" },
\r
306 static struct platform_driver rknand_driver = {
\r
307 .probe = rknand_probe,
\r
308 .suspend = rknand_suspend,
\r
309 .resume = rknand_resume,
\r
310 .shutdown = rknand_shutdown,
\r
314 .of_match_table = of_rk_nandc_match,
\r
316 .owner = THIS_MODULE,
\r
320 static void __exit rknand_part_exit(void)
\r
322 printk("rknand_part_exit: \n");
\r
323 platform_driver_unregister(&rknand_driver);
\r
324 if(gpNandInfo->rknand_exit)
\r
325 gpNandInfo->rknand_exit();
\r
330 MODULE_ALIAS(DRIVER_NAME);
\r
331 static int __init rknand_part_init(void)
\r
334 printk("%s\n", RKNAND_VERSION_AND_DATE);
\r
336 cmdline = strstr(saved_command_line, "mtdparts=") + 9;
\r
339 memset(g_nandc_info,0,sizeof(g_nandc_info));
\r
341 ret = platform_driver_register(&rknand_driver);
\r
342 printk("rknand_driver:ret = %x \n",ret);
\r
346 module_init(rknand_part_init);
\r
347 module_exit(rknand_part_exit);
\r