usb: ehci: add rockchip relinquishing port quirk support
[firefly-linux-kernel-4.4.55.git] / drivers / rk_nand / rk_nand_blk.c
1 /*
2  * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/fs.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/spinlock.h>
18 #include <linux/hdreg.h>
19 #include <linux/init.h>
20 #include <linux/semaphore.h>
21 #include <linux/platform_device.h>
22 #include <linux/interrupt.h>
23 #include <linux/timer.h>
24 #include <linux/delay.h>
25 #include <linux/clk.h>
26 #include <linux/mutex.h>
27 #include <linux/wait.h>
28 #include <linux/sched.h>
29 #include <linux/freezer.h>
30 #include <linux/kthread.h>
31 #include <linux/proc_fs.h>
32 #include <linux/version.h>
33
34 #include "rk_nand_blk.h"
35 #include "rk_ftl_api.h"
36
37 static struct nand_part disk_array[MAX_PART_COUNT];
38 static int g_max_part_num = 4;
39
40 #define PART_READONLY 0x85
41 #define PART_WRITEONLY 0x86
42 #define PART_NO_ACCESS 0x87
43
44 static unsigned long totle_read_data;
45 static unsigned long totle_write_data;
46 static unsigned long totle_read_count;
47 static unsigned long totle_write_count;
48 static int rk_nand_dev_initialised;
49
50 static char *mtd_read_temp_buffer;
51 #define MTD_RW_SECTORS (512)
52
53 static int rknand_proc_show(struct seq_file *m, void *v)
54 {
55         m->count = rknand_proc_ftlread(m->buf);
56         seq_printf(m, "Totle Read %ld KB\n", totle_read_data >> 1);
57         seq_printf(m, "Totle Write %ld KB\n", totle_write_data >> 1);
58         seq_printf(m, "totle_write_count %ld\n", totle_write_count);
59         seq_printf(m, "totle_read_count %ld\n", totle_read_count);
60         return 0;
61 }
62
63 static int rknand_mtd_proc_show(struct seq_file *m, void *v)
64 {
65         int i;
66
67         seq_printf(m, "%s", "dev:    size   erasesize  name\n");
68         for (i = 0; i < g_max_part_num; i++) {
69                 seq_printf(m, "rknand%d: %8.8llx %8.8x \"%s\"\n", i,
70                            (unsigned long long)disk_array[i].size * 512,
71                            32 * 0x200, disk_array[i].name);
72         }
73         return 0;
74 }
75
76 static int rknand_proc_open(struct inode *inode, struct file *file)
77 {
78         return single_open(file, rknand_proc_show, PDE_DATA(inode));
79 }
80
81 static int rknand_mtd_proc_open(struct inode *inode, struct file *file)
82 {
83         return single_open(file, rknand_mtd_proc_show, PDE_DATA(inode));
84 }
85
86 static const struct file_operations rknand_proc_fops = {
87         .owner          = THIS_MODULE,
88         .open           = rknand_proc_open,
89         .read           = seq_read,
90         .llseek         = seq_lseek,
91         .release        = single_release,
92 };
93
94 static const struct file_operations rknand_mtd_proc_fops = {
95         .owner          = THIS_MODULE,
96         .open           = rknand_mtd_proc_open,
97         .read           = seq_read,
98         .llseek         = seq_lseek,
99         .release        = single_release,
100 };
101
102 static int rknand_create_procfs(void)
103 {
104         struct proc_dir_entry *ent;
105
106         ent = proc_create_data("rknand", 0x666, NULL, &rknand_proc_fops,
107                                (void *)0);
108         if (!ent)
109                 return -1;
110
111         ent = proc_create_data("mtd", 0x666, NULL, &rknand_mtd_proc_fops,
112                                (void *)0);
113         if (!ent)
114                 return -1;
115         return 0;
116 }
117
118 static struct mutex g_rk_nand_ops_mutex;
119
120 static void rknand_device_lock_init(void)
121 {
122         mutex_init(&g_rk_nand_ops_mutex);
123 }
124
125 void rknand_device_lock(void)
126 {
127         mutex_lock(&g_rk_nand_ops_mutex);
128 }
129
130 int rknand_device_trylock(void)
131 {
132         return mutex_trylock(&g_rk_nand_ops_mutex);
133 }
134
135 void rknand_device_unlock(void)
136 {
137         mutex_unlock(&g_rk_nand_ops_mutex);
138 }
139
140 static int nand_dev_transfer(struct nand_blk_dev *dev,
141                              unsigned long start,
142                              unsigned long nsector,
143                              char *buf,
144                              int cmd,
145                              int totle_nsec)
146 {
147         int ret;
148
149         if (dev->disable_access ||
150             ((cmd == WRITE) && dev->readonly) ||
151             ((cmd == READ) && dev->writeonly)) {
152                 return -EIO;
153         }
154
155         start += dev->off_size;
156         rknand_device_lock();
157
158         switch (cmd) {
159         case READ:
160                 totle_read_data += nsector;
161                 totle_read_count++;
162                 ret = FtlRead(0, start, nsector, buf);
163                 if (ret)
164                         ret = -EIO;
165                 break;
166
167         case WRITE:
168                 totle_write_data += nsector;
169                 totle_write_count++;
170                 ret = FtlWrite(0, start, nsector, buf);
171                 if (ret)
172                         ret = -EIO;
173                 break;
174
175         default:
176                 ret = -EIO;
177                 break;
178         }
179
180         rknand_device_unlock();
181         return ret;
182 }
183
184 void rknand_queue_cond_resched(void)
185 {
186 };
187
188 static DECLARE_WAIT_QUEUE_HEAD(rknand_thread_wait);
189 static void rk_ftl_gc_timeout_hack(unsigned long data);
190 static DEFINE_TIMER(rk_ftl_gc_timeout, rk_ftl_gc_timeout_hack, 0, 0);
191 static unsigned long rk_ftl_gc_jiffies;
192 static unsigned long rk_ftl_gc_do;
193
194 static void rk_ftl_gc_timeout_hack(unsigned long data)
195 {
196         del_timer(&rk_ftl_gc_timeout);
197         rk_ftl_gc_do++;
198         rk_ftl_gc_timeout.expires = jiffies + rk_ftl_gc_jiffies * rk_ftl_gc_do;
199         add_timer(&rk_ftl_gc_timeout);
200 }
201
202 static int req_check_buffer_align(struct request *req, char **pbuf)
203 {
204         int nr_vec = 0;
205         struct bio_vec bv;
206         struct req_iterator iter;
207         char *buffer;
208         void *firstbuf = 0;
209         char *nextbuffer = 0;
210         unsigned long block, nsect;
211
212         block = blk_rq_pos(req);
213         nsect = blk_rq_cur_bytes(req) >> 9;
214         rq_for_each_segment(bv, req, iter) {
215                 buffer = page_address(bv.bv_page) + bv.bv_offset;
216                 if (firstbuf == 0)
217                         firstbuf = buffer;
218                 nr_vec++;
219                 if (nextbuffer != 0 && nextbuffer != buffer)
220                         return 0;
221                 nextbuffer = buffer + bv.bv_len;
222         }
223         *pbuf = firstbuf;
224         return 1;
225 }
226
227 static int nand_blktrans_thread(void *arg)
228 {
229         struct nand_blk_ops *nandr = arg;
230         struct request_queue *rq = nandr->rq;
231         struct request *req = NULL;
232         int ftl_gc_status = 0;
233         char *buf;
234         struct req_iterator rq_iter;
235         struct bio_vec bvec;
236         unsigned long long sector_index = ULLONG_MAX;
237         unsigned long totle_nsect;
238         unsigned long rq_len = 0;
239         int rw_flag = 0;
240         int req_empty_times = 0;
241
242         spin_lock_irq(rq->queue_lock);
243         rk_ftl_gc_jiffies = HZ * 5;
244         rk_ftl_gc_do = 0;
245         rk_ftl_gc_timeout.expires = jiffies + rk_ftl_gc_jiffies;
246         add_timer(&rk_ftl_gc_timeout);
247
248         while (!nandr->quit) {
249                 int res;
250                 struct nand_blk_dev *dev;
251                 DECLARE_WAITQUEUE(wait, current);
252
253                 if (!req)
254                         req = blk_fetch_request(rq);
255                 if (!req) {
256                         add_wait_queue(&nandr->thread_wq, &wait);
257                         set_current_state(TASK_INTERRUPTIBLE);
258                         spin_unlock_irq(rq->queue_lock);
259                         if (rknand_device_trylock()) {
260                                 ftl_gc_status = rk_ftl_garbage_collect(1, 0);
261                                 rknand_device_unlock();
262                                 rk_ftl_gc_jiffies = HZ / 50;
263                                 if (ftl_gc_status == 0) {
264                                         rk_ftl_gc_jiffies = 1 * HZ;
265                                 } else if (ftl_gc_status < 8) {
266                                         spin_lock_irq(rq->queue_lock);
267                                         remove_wait_queue(&nandr->thread_wq,
268                                                           &wait);
269                                         continue;
270                                 }
271                         } else {
272                                 rk_ftl_gc_jiffies = HZ / 50;
273                         }
274                         req_empty_times++;
275                         if (req_empty_times < 10)
276                                 rk_ftl_gc_jiffies = HZ / 50;
277                         /* 100ms cache write back */
278                         if (req_empty_times >= 5 && req_empty_times < 7) {
279                                 rknand_device_lock();
280                                 rk_ftl_cache_write_back();
281                                 rknand_device_unlock();
282                         }
283                         wait_event_timeout(nandr->thread_wq,
284                                            rk_ftl_gc_do || nandr->quit,
285                                            rk_ftl_gc_jiffies);
286                         rk_ftl_gc_do = 0;
287                         spin_lock_irq(rq->queue_lock);
288                         remove_wait_queue(&nandr->thread_wq, &wait);
289                         continue;
290                 } else {
291                         rk_ftl_gc_jiffies = 1 * HZ;
292                         req_empty_times = 0;
293                 }
294
295                 dev = req->rq_disk->private_data;
296                 totle_nsect = (req->__data_len) >> 9;
297                 sector_index = blk_rq_pos(req);
298                 rq_len = 0;
299                 buf = 0;
300                 res = 0;
301
302                 if (req->cmd_flags & REQ_DISCARD) {
303                         spin_unlock_irq(rq->queue_lock);
304                         rknand_device_lock();
305                         if (FtlDiscard(blk_rq_pos(req) +
306                                        dev->off_size, totle_nsect))
307                                 res = -EIO;
308                         rknand_device_unlock();
309                         spin_lock_irq(rq->queue_lock);
310                         if (!__blk_end_request_cur(req, res))
311                                 req = NULL;
312                         continue;
313                 } else if (req->cmd_flags & REQ_FLUSH) {
314                         spin_unlock_irq(rq->queue_lock);
315                         rknand_device_lock();
316                         rk_ftl_cache_write_back();
317                         rknand_device_unlock();
318                         spin_lock_irq(rq->queue_lock);
319                         if (!__blk_end_request_cur(req, res))
320                                 req = NULL;
321                         continue;
322                 }
323
324                 rw_flag = req->cmd_flags & REQ_WRITE;
325                 if (rw_flag == READ && mtd_read_temp_buffer) {
326                         buf = mtd_read_temp_buffer;
327                         req_check_buffer_align(req, &buf);
328                         spin_unlock_irq(rq->queue_lock);
329                         res = nand_dev_transfer(dev,
330                                                 sector_index,
331                                                 totle_nsect,
332                                                 buf,
333                                                 rw_flag,
334                                                 totle_nsect);
335                         spin_lock_irq(rq->queue_lock);
336                         if (buf == mtd_read_temp_buffer) {
337                                 char *p = buf;
338
339                                 rq_for_each_segment(bvec, req, rq_iter) {
340                                         memcpy(page_address(bvec.bv_page) +
341                                                bvec.bv_offset,
342                                                p,
343                                                bvec.bv_len);
344                                         p += bvec.bv_len;
345                                 }
346                         }
347                 } else {
348                         rq_for_each_segment(bvec, req, rq_iter) {
349                                 if ((page_address(bvec.bv_page)
350                                         + bvec.bv_offset)
351                                         == (buf + rq_len)) {
352                                         rq_len += bvec.bv_len;
353                                 } else {
354                                         if (rq_len) {
355                                                 spin_unlock_irq(rq->queue_lock);
356                                                 res = nand_dev_transfer(dev,
357                                                                 sector_index,
358                                                                 rq_len >> 9,
359                                                                 buf,
360                                                                 rw_flag,
361                                                                 totle_nsect);
362                                                 spin_lock_irq(rq->queue_lock);
363                                         }
364                                         sector_index += rq_len >> 9;
365                                         buf = (page_address(bvec.bv_page) +
366                                                 bvec.bv_offset);
367                                         rq_len = bvec.bv_len;
368                                 }
369                         }
370                         if (rq_len) {
371                                 spin_unlock_irq(rq->queue_lock);
372                                 res = nand_dev_transfer(dev,
373                                                         sector_index,
374                                                         rq_len >> 9,
375                                                         buf,
376                                                         rw_flag,
377                                                         totle_nsect);
378                                 spin_lock_irq(rq->queue_lock);
379                         }
380                 }
381                 __blk_end_request_all(req, res);
382                 req = NULL;
383         }
384         pr_info("nand th quited\n");
385         nandr->nand_th_quited = 1;
386         if (req)
387                 __blk_end_request_all(req, -EIO);
388         rk_nand_schedule_enable_config(0);
389         while ((req = blk_fetch_request(rq)) != NULL)
390                 __blk_end_request_all(req, -ENODEV);
391         spin_unlock_irq(rq->queue_lock);
392         complete_and_exit(&nandr->thread_exit, 0);
393         return 0;
394 }
395
396 static void nand_blk_request(struct request_queue *rq)
397 {
398         struct nand_blk_ops *nandr = rq->queuedata;
399         struct request *req = NULL;
400
401         if (nandr->nand_th_quited) {
402                 while ((req = blk_fetch_request(rq)) != NULL)
403                         __blk_end_request_all(req, -ENODEV);
404                 return;
405         }
406         rk_ftl_gc_do = 1;
407         wake_up(&nandr->thread_wq);
408 }
409
410 static int rknand_get_part(char *parts,
411                            struct nand_part *this_part,
412                            int *part_index)
413 {
414         char delim;
415         unsigned int mask_flags;
416         unsigned long long size, offset = ULLONG_MAX;
417         char name[40] = "\0";
418
419         if (*parts == '-') {
420                 size = ULLONG_MAX;
421                 parts++;
422         } else {
423                 size = memparse(parts, &parts);
424         }
425
426         if (*parts == '@') {
427                 parts++;
428                 offset = memparse(parts, &parts);
429         }
430
431         mask_flags = 0;
432         delim = 0;
433
434         if (*parts == '(')
435                 delim = ')';
436
437         if (delim) {
438                 char *p;
439
440                 p = strchr(parts + 1, delim);
441                 if (p == 0)
442                         return 0;
443                 strncpy(name, parts + 1, p - (parts + 1));
444                 parts = p + 1;
445         }
446
447         if (strncmp(parts, "ro", 2) == 0) {
448                 mask_flags = PART_READONLY;
449                 parts += 2;
450         }
451
452         if (strncmp(parts, "wo", 2) == 0) {
453                 mask_flags = PART_WRITEONLY;
454                 parts += 2;
455         }
456
457         this_part->size = (unsigned long)size;
458         this_part->offset = (unsigned long)offset;
459         this_part->type = mask_flags;
460         sprintf(this_part->name, "%s", name);
461
462         if ((++(*part_index) < MAX_PART_COUNT) && (*parts == ','))
463                 rknand_get_part(++parts, this_part + 1, part_index);
464
465         return 1;
466 }
467
468 static int nand_prase_cmdline_part(struct nand_part *pdisk_part)
469 {
470         char *pbuf;
471         int part_num = 0, i;
472         unsigned int cap_size = rk_ftl_get_capacity();
473         char *cmdline;
474
475         cmdline = strstr(saved_command_line, "mtdparts=") + 9;
476         if (!memcmp(cmdline, "rk29xxnand:", strlen("rk29xxnand:"))) {
477                 pbuf = cmdline + strlen("rk29xxnand:");
478                 rknand_get_part(pbuf, pdisk_part, &part_num);
479                 if (part_num)
480                         pdisk_part[part_num - 1].size = cap_size -
481                                 pdisk_part[part_num - 1].offset;
482
483                 for (i = 0; i < part_num; i++) {
484                         if (pdisk_part[i].size + pdisk_part[i].offset
485                                 > cap_size) {
486                                 pdisk_part[i].size = cap_size -
487                                         pdisk_part[i].offset;
488                                 pr_err("partition config error....\n");
489                                 if (pdisk_part[i].size)
490                                         return i;
491                                 else
492                                         return (i + 1);
493                         }
494                 }
495                 return part_num;
496         }
497         return 0;
498 }
499
500 static int rknand_open(struct block_device *bdev, fmode_t mode)
501 {
502         return 0;
503 }
504
505 static void rknand_release(struct gendisk *disk, fmode_t mode)
506 {
507 };
508
509 #define DISABLE_WRITE _IO('V', 0)
510 #define ENABLE_WRITE _IO('V', 1)
511 #define DISABLE_READ _IO('V', 2)
512 #define ENABLE_READ _IO('V', 3)
513 static int rknand_ioctl(struct block_device *bdev, fmode_t mode,
514                         unsigned int cmd,
515                         unsigned long arg)
516 {
517         struct nand_blk_dev *dev = bdev->bd_disk->private_data;
518
519         switch (cmd) {
520         case ENABLE_WRITE:
521                 dev->disable_access = 0;
522                 dev->readonly = 0;
523                 set_disk_ro(dev->blkcore_priv, 0);
524                 return 0;
525
526         case DISABLE_WRITE:
527                 dev->readonly = 1;
528                 set_disk_ro(dev->blkcore_priv, 1);
529                 return 0;
530
531         case ENABLE_READ:
532                 dev->disable_access = 0;
533                 dev->writeonly = 0;
534                 return 0;
535
536         case DISABLE_READ:
537                 dev->writeonly = 1;
538                 return 0;
539         default:
540                 return -ENOTTY;
541         }
542 }
543
544 const struct block_device_operations nand_blktrans_ops = {
545         .owner = THIS_MODULE,
546         .open = rknand_open,
547         .release = rknand_release,
548         .ioctl = rknand_ioctl,
549 };
550
551 static struct nand_blk_ops mytr = {
552         .name =  "rknand",
553         .major = 31,
554         .minorbits = 0,
555         .owner = THIS_MODULE,
556 };
557
558 static int nand_add_dev(struct nand_blk_ops *nandr, struct nand_part *part)
559 {
560         struct nand_blk_dev *dev;
561         struct gendisk *gd;
562
563         if (part->size == 0)
564                 return -1;
565
566         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
567         if (!dev)
568                 return -ENOMEM;
569
570         gd = alloc_disk(1 << nandr->minorbits);
571         if (!gd) {
572                 kfree(dev);
573                 return -ENOMEM;
574         }
575
576         dev->nandr = nandr;
577         dev->size = part->size;
578         dev->off_size = part->offset;
579         dev->devnum = nandr->last_dev_index;
580         list_add_tail(&dev->list, &nandr->devs);
581         nandr->last_dev_index++;
582
583         gd->major = nandr->major;
584         gd->first_minor = (dev->devnum) << nandr->minorbits;
585         gd->fops = &nand_blktrans_ops;
586
587         if (part->name[0])
588                 snprintf(gd->disk_name,
589                          sizeof(gd->disk_name),
590                          "%s_%s",
591                          nandr->name,
592                          part->name);
593         else
594                 snprintf(gd->disk_name,
595                          sizeof(gd->disk_name),
596                          "%s%d",
597                          nandr->name,
598                          dev->devnum);
599
600         set_capacity(gd, dev->size);
601
602         gd->private_data = dev;
603         dev->blkcore_priv = gd;
604         gd->queue = nandr->rq;
605         gd->queue->bypass_depth = 1;
606
607         if (part->type == PART_NO_ACCESS)
608                 dev->disable_access = 1;
609
610         if (part->type == PART_READONLY)
611                 dev->readonly = 1;
612
613         if (part->type == PART_WRITEONLY)
614                 dev->writeonly = 1;
615
616         if (dev->readonly)
617                 set_disk_ro(gd, 1);
618
619         add_disk(gd);
620
621         return 0;
622 }
623
624 static int nand_remove_dev(struct nand_blk_dev *dev)
625 {
626         struct gendisk *gd;
627
628         gd = dev->blkcore_priv;
629         list_del(&dev->list);
630         gd->queue = NULL;
631         del_gendisk(gd);
632         put_disk(gd);
633         kfree(dev);
634         return 0;
635 }
636
637 int nand_blk_add_whole_disk(void)
638 {
639         struct nand_part part;
640
641         part.offset = 0;
642         part.size = rk_ftl_get_capacity();
643         part.type = 0;
644         memcpy(part.name, "rknand", sizeof("rknand"));
645         nand_add_dev(&mytr, &part);
646         return 0;
647 }
648
649 static int nand_blk_register(struct nand_blk_ops *nandr)
650 {
651         struct task_struct *tsk;
652         int i, ret;
653         int offset;
654
655         rk_nand_schedule_enable_config(1);
656         nandr->quit = 0;
657         nandr->nand_th_quited = 0;
658
659         mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512,
660                                        GFP_KERNEL | GFP_DMA);
661
662         ret = register_blkdev(nandr->major, nandr->name);
663         if (ret)
664                 return -1;
665
666         spin_lock_init(&nandr->queue_lock);
667         init_completion(&nandr->thread_exit);
668         init_waitqueue_head(&nandr->thread_wq);
669         rknand_device_lock_init();
670
671         nandr->rq = blk_init_queue(nand_blk_request, &nandr->queue_lock);
672         if (!nandr->rq) {
673                 unregister_blkdev(nandr->major, nandr->name);
674                 return  -1;
675         }
676
677         blk_queue_max_hw_sectors(nandr->rq, MTD_RW_SECTORS);
678         blk_queue_max_segments(nandr->rq, MTD_RW_SECTORS);
679
680         queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nandr->rq);
681         blk_queue_max_discard_sectors(nandr->rq, UINT_MAX >> 9);
682
683         nandr->rq->queuedata = nandr;
684         INIT_LIST_HEAD(&nandr->devs);
685         tsk = kthread_run(nand_blktrans_thread, (void *)nandr, "rknand");
686
687         g_max_part_num = nand_prase_cmdline_part(disk_array);
688         offset = 0;
689         nandr->last_dev_index = 0;
690         for (i = 0; i < g_max_part_num; i++) {
691                 pr_info("%10s: 0x%09llx -- 0x%09llx (%llu MB)\n",
692                         disk_array[i].name,
693                         (u64)disk_array[i].offset * 512,
694                         (u64)(disk_array[i].offset + disk_array[i].size) * 512,
695                         (u64)disk_array[i].size / 2048);
696                 nand_add_dev(nandr, &disk_array[i]);
697         }
698
699         rknand_create_procfs();
700         rk_ftl_storage_sys_init();
701
702         return 0;
703 }
704
705 static void nand_blk_unregister(struct nand_blk_ops *nandr)
706 {
707         struct list_head *this, *next;
708
709         nandr->quit = 1;
710         wake_up(&nandr->thread_wq);
711         wait_for_completion(&nandr->thread_exit);
712         list_for_each_safe(this, next, &nandr->devs) {
713                 struct nand_blk_dev *dev
714                         = list_entry(this, struct nand_blk_dev, list);
715
716                 nand_remove_dev(dev);
717         }
718         blk_cleanup_queue(nandr->rq);
719         unregister_blkdev(nandr->major, nandr->name);
720 }
721
722 void rknand_dev_flush(void)
723 {
724         rknand_device_lock();
725         rk_ftl_cache_write_back();
726         rknand_device_unlock();
727         pr_info("Nand flash flush ok!\n");
728 }
729
730 int __init rknand_dev_init(void)
731 {
732         int ret;
733         void __iomem *nandc0;
734         void __iomem *nandc1;
735
736         rknand_get_reg_addr((unsigned long *)&nandc0, (unsigned long *)&nandc1);
737         if (nandc0 == 0)
738                 return -1;
739
740         ret = rk_ftl_init();
741         if (ret) {
742                 pr_err("rk_ftl_init fail\n");
743                 return -1;
744         }
745
746         ret = nand_blk_register(&mytr);
747         if (ret) {
748                 pr_err("nand_blk_register fail\n");
749                 return -1;
750         }
751
752         rk_nand_dev_initialised = 1;
753         return ret;
754 }
755
756 int rknand_dev_exit(void)
757 {
758         if (rk_nand_dev_initialised) {
759                 rk_nand_dev_initialised = 0;
760                 if (rknand_device_trylock()) {
761                         rk_ftl_cache_write_back();
762                         rknand_device_unlock();
763                 }
764                 nand_blk_unregister(&mytr);
765                 rk_ftl_de_init();
766                 pr_info("nand_blk_dev_exit:OK\n");
767         }
768         return 0;
769 }
770
771 void rknand_dev_suspend(void)
772 {
773         pr_info("rk_nand_suspend\n");
774         rk_nand_schedule_enable_config(0);
775         rknand_device_lock();
776         rk_nand_suspend();
777 }
778
779 void rknand_dev_resume(void)
780 {
781         pr_info("rk_nand_resume\n");
782         rk_nand_resume();
783         rknand_device_unlock();
784         rk_nand_schedule_enable_config(1);
785 }
786
787 void rknand_dev_shutdown(void)
788 {
789         pr_info("rknand_shutdown...\n");
790         if (mytr.quit == 0) {
791                 mytr.quit = 1;
792                 wake_up(&mytr.thread_wq);
793                 wait_for_completion(&mytr.thread_exit);
794                 rk_ftl_de_init();
795         }
796         pr_info("rknand_shutdown:OK\n");
797 }
798