2 * Copyright (c) 2016, Fuzhou Rockchip Electronics Co., Ltd
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/kernel.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
15 #include <linux/blkdev.h>
16 #include <linux/blkpg.h>
17 #include <linux/spinlock.h>
18 #include <linux/hdreg.h>
19 #include <linux/init.h>
20 #include <linux/semaphore.h>
21 #include <linux/platform_device.h>
22 #include <linux/interrupt.h>
23 #include <linux/timer.h>
24 #include <linux/delay.h>
25 #include <linux/clk.h>
26 #include <linux/mutex.h>
27 #include <linux/wait.h>
28 #include <linux/sched.h>
29 #include <linux/freezer.h>
30 #include <linux/kthread.h>
31 #include <linux/proc_fs.h>
32 #include <linux/version.h>
34 #include "rk_nand_blk.h"
35 #include "rk_ftl_api.h"
37 static struct nand_part disk_array[MAX_PART_COUNT];
38 static int g_max_part_num = 4;
40 #define PART_READONLY 0x85
41 #define PART_WRITEONLY 0x86
42 #define PART_NO_ACCESS 0x87
44 static unsigned long totle_read_data;
45 static unsigned long totle_write_data;
46 static unsigned long totle_read_count;
47 static unsigned long totle_write_count;
48 static int rk_nand_dev_initialised;
50 static char *mtd_read_temp_buffer;
51 #define MTD_RW_SECTORS (512)
53 static int rknand_proc_show(struct seq_file *m, void *v)
55 m->count = rknand_proc_ftlread(m->buf);
56 seq_printf(m, "Totle Read %ld KB\n", totle_read_data >> 1);
57 seq_printf(m, "Totle Write %ld KB\n", totle_write_data >> 1);
58 seq_printf(m, "totle_write_count %ld\n", totle_write_count);
59 seq_printf(m, "totle_read_count %ld\n", totle_read_count);
63 static int rknand_mtd_proc_show(struct seq_file *m, void *v)
67 seq_printf(m, "%s", "dev: size erasesize name\n");
68 for (i = 0; i < g_max_part_num; i++) {
69 seq_printf(m, "rknand%d: %8.8llx %8.8x \"%s\"\n", i,
70 (unsigned long long)disk_array[i].size * 512,
71 32 * 0x200, disk_array[i].name);
76 static int rknand_proc_open(struct inode *inode, struct file *file)
78 return single_open(file, rknand_proc_show, PDE_DATA(inode));
81 static int rknand_mtd_proc_open(struct inode *inode, struct file *file)
83 return single_open(file, rknand_mtd_proc_show, PDE_DATA(inode));
86 static const struct file_operations rknand_proc_fops = {
88 .open = rknand_proc_open,
91 .release = single_release,
94 static const struct file_operations rknand_mtd_proc_fops = {
96 .open = rknand_mtd_proc_open,
99 .release = single_release,
102 static int rknand_create_procfs(void)
104 struct proc_dir_entry *ent;
106 ent = proc_create_data("rknand", 0x666, NULL, &rknand_proc_fops,
111 ent = proc_create_data("mtd", 0x666, NULL, &rknand_mtd_proc_fops,
118 static struct mutex g_rk_nand_ops_mutex;
120 static void rknand_device_lock_init(void)
122 mutex_init(&g_rk_nand_ops_mutex);
125 void rknand_device_lock(void)
127 mutex_lock(&g_rk_nand_ops_mutex);
130 int rknand_device_trylock(void)
132 return mutex_trylock(&g_rk_nand_ops_mutex);
135 void rknand_device_unlock(void)
137 mutex_unlock(&g_rk_nand_ops_mutex);
140 static int nand_dev_transfer(struct nand_blk_dev *dev,
142 unsigned long nsector,
149 if (dev->disable_access ||
150 ((cmd == WRITE) && dev->readonly) ||
151 ((cmd == READ) && dev->writeonly)) {
155 start += dev->off_size;
156 rknand_device_lock();
160 totle_read_data += nsector;
162 ret = FtlRead(0, start, nsector, buf);
168 totle_write_data += nsector;
170 ret = FtlWrite(0, start, nsector, buf);
180 rknand_device_unlock();
184 void rknand_queue_cond_resched(void)
188 static DECLARE_WAIT_QUEUE_HEAD(rknand_thread_wait);
189 static void rk_ftl_gc_timeout_hack(unsigned long data);
190 static DEFINE_TIMER(rk_ftl_gc_timeout, rk_ftl_gc_timeout_hack, 0, 0);
191 static unsigned long rk_ftl_gc_jiffies;
192 static unsigned long rk_ftl_gc_do;
194 static void rk_ftl_gc_timeout_hack(unsigned long data)
196 del_timer(&rk_ftl_gc_timeout);
198 rk_ftl_gc_timeout.expires = jiffies + rk_ftl_gc_jiffies * rk_ftl_gc_do;
199 add_timer(&rk_ftl_gc_timeout);
202 static int req_check_buffer_align(struct request *req, char **pbuf)
206 struct req_iterator iter;
209 char *nextbuffer = 0;
210 unsigned long block, nsect;
212 block = blk_rq_pos(req);
213 nsect = blk_rq_cur_bytes(req) >> 9;
214 rq_for_each_segment(bv, req, iter) {
215 buffer = page_address(bv.bv_page) + bv.bv_offset;
219 if (nextbuffer != 0 && nextbuffer != buffer)
221 nextbuffer = buffer + bv.bv_len;
227 static int nand_blktrans_thread(void *arg)
229 struct nand_blk_ops *nandr = arg;
230 struct request_queue *rq = nandr->rq;
231 struct request *req = NULL;
232 int ftl_gc_status = 0;
234 struct req_iterator rq_iter;
236 unsigned long long sector_index = ULLONG_MAX;
237 unsigned long totle_nsect;
238 unsigned long rq_len = 0;
240 int req_empty_times = 0;
242 spin_lock_irq(rq->queue_lock);
243 rk_ftl_gc_jiffies = HZ * 5;
245 rk_ftl_gc_timeout.expires = jiffies + rk_ftl_gc_jiffies;
246 add_timer(&rk_ftl_gc_timeout);
248 while (!nandr->quit) {
250 struct nand_blk_dev *dev;
251 DECLARE_WAITQUEUE(wait, current);
254 req = blk_fetch_request(rq);
256 add_wait_queue(&nandr->thread_wq, &wait);
257 set_current_state(TASK_INTERRUPTIBLE);
258 spin_unlock_irq(rq->queue_lock);
259 if (rknand_device_trylock()) {
260 ftl_gc_status = rk_ftl_garbage_collect(1, 0);
261 rknand_device_unlock();
262 rk_ftl_gc_jiffies = HZ / 50;
263 if (ftl_gc_status == 0) {
264 rk_ftl_gc_jiffies = 1 * HZ;
265 } else if (ftl_gc_status < 8) {
266 spin_lock_irq(rq->queue_lock);
267 remove_wait_queue(&nandr->thread_wq,
272 rk_ftl_gc_jiffies = HZ / 50;
275 if (req_empty_times < 10)
276 rk_ftl_gc_jiffies = HZ / 50;
277 /* 100ms cache write back */
278 if (req_empty_times >= 5 && req_empty_times < 7) {
279 rknand_device_lock();
280 rk_ftl_cache_write_back();
281 rknand_device_unlock();
283 wait_event_timeout(nandr->thread_wq,
284 rk_ftl_gc_do || nandr->quit,
287 spin_lock_irq(rq->queue_lock);
288 remove_wait_queue(&nandr->thread_wq, &wait);
291 rk_ftl_gc_jiffies = 1 * HZ;
295 dev = req->rq_disk->private_data;
296 totle_nsect = (req->__data_len) >> 9;
297 sector_index = blk_rq_pos(req);
302 if (req->cmd_flags & REQ_DISCARD) {
303 spin_unlock_irq(rq->queue_lock);
304 rknand_device_lock();
305 if (FtlDiscard(blk_rq_pos(req) +
306 dev->off_size, totle_nsect))
308 rknand_device_unlock();
309 spin_lock_irq(rq->queue_lock);
310 if (!__blk_end_request_cur(req, res))
313 } else if (req->cmd_flags & REQ_FLUSH) {
314 spin_unlock_irq(rq->queue_lock);
315 rknand_device_lock();
316 rk_ftl_cache_write_back();
317 rknand_device_unlock();
318 spin_lock_irq(rq->queue_lock);
319 if (!__blk_end_request_cur(req, res))
324 rw_flag = req->cmd_flags & REQ_WRITE;
325 if (rw_flag == READ && mtd_read_temp_buffer) {
326 buf = mtd_read_temp_buffer;
327 req_check_buffer_align(req, &buf);
328 spin_unlock_irq(rq->queue_lock);
329 res = nand_dev_transfer(dev,
335 spin_lock_irq(rq->queue_lock);
336 if (buf == mtd_read_temp_buffer) {
339 rq_for_each_segment(bvec, req, rq_iter) {
340 memcpy(page_address(bvec.bv_page) +
348 rq_for_each_segment(bvec, req, rq_iter) {
349 if ((page_address(bvec.bv_page)
352 rq_len += bvec.bv_len;
355 spin_unlock_irq(rq->queue_lock);
356 res = nand_dev_transfer(dev,
362 spin_lock_irq(rq->queue_lock);
364 sector_index += rq_len >> 9;
365 buf = (page_address(bvec.bv_page) +
367 rq_len = bvec.bv_len;
371 spin_unlock_irq(rq->queue_lock);
372 res = nand_dev_transfer(dev,
378 spin_lock_irq(rq->queue_lock);
381 __blk_end_request_all(req, res);
384 pr_info("nand th quited\n");
385 nandr->nand_th_quited = 1;
387 __blk_end_request_all(req, -EIO);
388 rk_nand_schedule_enable_config(0);
389 while ((req = blk_fetch_request(rq)) != NULL)
390 __blk_end_request_all(req, -ENODEV);
391 spin_unlock_irq(rq->queue_lock);
392 complete_and_exit(&nandr->thread_exit, 0);
396 static void nand_blk_request(struct request_queue *rq)
398 struct nand_blk_ops *nandr = rq->queuedata;
399 struct request *req = NULL;
401 if (nandr->nand_th_quited) {
402 while ((req = blk_fetch_request(rq)) != NULL)
403 __blk_end_request_all(req, -ENODEV);
407 wake_up(&nandr->thread_wq);
410 static int rknand_get_part(char *parts,
411 struct nand_part *this_part,
415 unsigned int mask_flags;
416 unsigned long long size, offset = ULLONG_MAX;
417 char name[40] = "\0";
423 size = memparse(parts, &parts);
428 offset = memparse(parts, &parts);
440 p = strchr(parts + 1, delim);
443 strncpy(name, parts + 1, p - (parts + 1));
447 if (strncmp(parts, "ro", 2) == 0) {
448 mask_flags = PART_READONLY;
452 if (strncmp(parts, "wo", 2) == 0) {
453 mask_flags = PART_WRITEONLY;
457 this_part->size = (unsigned long)size;
458 this_part->offset = (unsigned long)offset;
459 this_part->type = mask_flags;
460 sprintf(this_part->name, "%s", name);
462 if ((++(*part_index) < MAX_PART_COUNT) && (*parts == ','))
463 rknand_get_part(++parts, this_part + 1, part_index);
468 static int nand_prase_cmdline_part(struct nand_part *pdisk_part)
472 unsigned int cap_size = rk_ftl_get_capacity();
475 cmdline = strstr(saved_command_line, "mtdparts=") + 9;
476 if (!memcmp(cmdline, "rk29xxnand:", strlen("rk29xxnand:"))) {
477 pbuf = cmdline + strlen("rk29xxnand:");
478 rknand_get_part(pbuf, pdisk_part, &part_num);
480 pdisk_part[part_num - 1].size = cap_size -
481 pdisk_part[part_num - 1].offset;
483 for (i = 0; i < part_num; i++) {
484 if (pdisk_part[i].size + pdisk_part[i].offset
486 pdisk_part[i].size = cap_size -
487 pdisk_part[i].offset;
488 pr_err("partition config error....\n");
489 if (pdisk_part[i].size)
500 static int rknand_open(struct block_device *bdev, fmode_t mode)
505 static void rknand_release(struct gendisk *disk, fmode_t mode)
509 #define DISABLE_WRITE _IO('V', 0)
510 #define ENABLE_WRITE _IO('V', 1)
511 #define DISABLE_READ _IO('V', 2)
512 #define ENABLE_READ _IO('V', 3)
513 static int rknand_ioctl(struct block_device *bdev, fmode_t mode,
517 struct nand_blk_dev *dev = bdev->bd_disk->private_data;
521 dev->disable_access = 0;
523 set_disk_ro(dev->blkcore_priv, 0);
528 set_disk_ro(dev->blkcore_priv, 1);
532 dev->disable_access = 0;
544 const struct block_device_operations nand_blktrans_ops = {
545 .owner = THIS_MODULE,
547 .release = rknand_release,
548 .ioctl = rknand_ioctl,
551 static struct nand_blk_ops mytr = {
555 .owner = THIS_MODULE,
558 static int nand_add_dev(struct nand_blk_ops *nandr, struct nand_part *part)
560 struct nand_blk_dev *dev;
566 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
570 gd = alloc_disk(1 << nandr->minorbits);
577 dev->size = part->size;
578 dev->off_size = part->offset;
579 dev->devnum = nandr->last_dev_index;
580 list_add_tail(&dev->list, &nandr->devs);
581 nandr->last_dev_index++;
583 gd->major = nandr->major;
584 gd->first_minor = (dev->devnum) << nandr->minorbits;
585 gd->fops = &nand_blktrans_ops;
588 snprintf(gd->disk_name,
589 sizeof(gd->disk_name),
594 snprintf(gd->disk_name,
595 sizeof(gd->disk_name),
600 set_capacity(gd, dev->size);
602 gd->private_data = dev;
603 dev->blkcore_priv = gd;
604 gd->queue = nandr->rq;
605 gd->queue->bypass_depth = 1;
607 if (part->type == PART_NO_ACCESS)
608 dev->disable_access = 1;
610 if (part->type == PART_READONLY)
613 if (part->type == PART_WRITEONLY)
624 static int nand_remove_dev(struct nand_blk_dev *dev)
628 gd = dev->blkcore_priv;
629 list_del(&dev->list);
637 int nand_blk_add_whole_disk(void)
639 struct nand_part part;
642 part.size = rk_ftl_get_capacity();
644 memcpy(part.name, "rknand", sizeof("rknand"));
645 nand_add_dev(&mytr, &part);
649 static int nand_blk_register(struct nand_blk_ops *nandr)
651 struct task_struct *tsk;
655 rk_nand_schedule_enable_config(1);
657 nandr->nand_th_quited = 0;
659 mtd_read_temp_buffer = kmalloc(MTD_RW_SECTORS * 512,
660 GFP_KERNEL | GFP_DMA);
662 ret = register_blkdev(nandr->major, nandr->name);
666 spin_lock_init(&nandr->queue_lock);
667 init_completion(&nandr->thread_exit);
668 init_waitqueue_head(&nandr->thread_wq);
669 rknand_device_lock_init();
671 nandr->rq = blk_init_queue(nand_blk_request, &nandr->queue_lock);
673 unregister_blkdev(nandr->major, nandr->name);
677 blk_queue_max_hw_sectors(nandr->rq, MTD_RW_SECTORS);
678 blk_queue_max_segments(nandr->rq, MTD_RW_SECTORS);
680 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nandr->rq);
681 blk_queue_max_discard_sectors(nandr->rq, UINT_MAX >> 9);
683 nandr->rq->queuedata = nandr;
684 INIT_LIST_HEAD(&nandr->devs);
685 tsk = kthread_run(nand_blktrans_thread, (void *)nandr, "rknand");
687 g_max_part_num = nand_prase_cmdline_part(disk_array);
689 nandr->last_dev_index = 0;
690 for (i = 0; i < g_max_part_num; i++) {
691 pr_info("%10s: 0x%09llx -- 0x%09llx (%llu MB)\n",
693 (u64)disk_array[i].offset * 512,
694 (u64)(disk_array[i].offset + disk_array[i].size) * 512,
695 (u64)disk_array[i].size / 2048);
696 nand_add_dev(nandr, &disk_array[i]);
699 rknand_create_procfs();
700 rk_ftl_storage_sys_init();
705 static void nand_blk_unregister(struct nand_blk_ops *nandr)
707 struct list_head *this, *next;
710 wake_up(&nandr->thread_wq);
711 wait_for_completion(&nandr->thread_exit);
712 list_for_each_safe(this, next, &nandr->devs) {
713 struct nand_blk_dev *dev
714 = list_entry(this, struct nand_blk_dev, list);
716 nand_remove_dev(dev);
718 blk_cleanup_queue(nandr->rq);
719 unregister_blkdev(nandr->major, nandr->name);
722 void rknand_dev_flush(void)
724 rknand_device_lock();
725 rk_ftl_cache_write_back();
726 rknand_device_unlock();
727 pr_info("Nand flash flush ok!\n");
730 int __init rknand_dev_init(void)
733 void __iomem *nandc0;
734 void __iomem *nandc1;
736 rknand_get_reg_addr((unsigned long *)&nandc0, (unsigned long *)&nandc1);
742 pr_err("rk_ftl_init fail\n");
746 ret = nand_blk_register(&mytr);
748 pr_err("nand_blk_register fail\n");
752 rk_nand_dev_initialised = 1;
756 int rknand_dev_exit(void)
758 if (rk_nand_dev_initialised) {
759 rk_nand_dev_initialised = 0;
760 if (rknand_device_trylock()) {
761 rk_ftl_cache_write_back();
762 rknand_device_unlock();
764 nand_blk_unregister(&mytr);
766 pr_info("nand_blk_dev_exit:OK\n");
771 void rknand_dev_suspend(void)
773 pr_info("rk_nand_suspend\n");
774 rk_nand_schedule_enable_config(0);
775 rknand_device_lock();
779 void rknand_dev_resume(void)
781 pr_info("rk_nand_resume\n");
783 rknand_device_unlock();
784 rk_nand_schedule_enable_config(1);
787 void rknand_dev_shutdown(void)
789 pr_info("rknand_shutdown...\n");
790 if (mytr.quit == 0) {
792 wake_up(&mytr.thread_wq);
793 wait_for_completion(&mytr.thread_exit);
796 pr_info("rknand_shutdown:OK\n");