2 * drivers/mtd/devices/tegra_nand.c
4 * Copyright (C) 2010 Google, Inc.
5 * Author: Dima Zavin <dima@android.com>
6 * Colin Cross <ccross@android.com>
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * Derived from: drivers/mtd/nand/nand_base.c
18 * drivers/mtd/nand/pxa3xx.c
21 * - Add support for 16bit bus width
24 #include <linux/delay.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/init.h>
27 #include <linux/interrupt.h>
29 #include <linux/module.h>
30 #include <linux/mutex.h>
31 #include <linux/mtd/nand.h>
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/partitions.h>
34 #include <linux/platform_device.h>
35 #include <linux/types.h>
36 #include <linux/clk.h>
37 #include <linux/slab.h>
39 #include <mach/nand.h>
41 #include "tegra_nand.h"
43 #define DRIVER_NAME "tegra_nand"
44 #define DRIVER_DESC "Nvidia Tegra NAND Flash Controller driver"
46 #define MAX_DMA_SZ SZ_64K
47 #define ECC_BUF_SZ SZ_1K
49 /* FIXME: is this right?!
50 * NvRM code says it should be 128 bytes, but that seems awfully small
53 /*#define TEGRA_NAND_DEBUG
54 #define TEGRA_NAND_DEBUG_PEDANTIC*/
56 #ifdef TEGRA_NAND_DEBUG
57 #define TEGRA_DBG(fmt, args...) \
58 do { pr_info(fmt, ##args); } while (0)
60 #define TEGRA_DBG(fmt, args...)
63 /* TODO: will vary with devices, move into appropriate device spcific header */
64 #define SCAN_TIMING_VAL 0x3f0bd214
65 #define SCAN_TIMING2_VAL 0xb
67 /* TODO: pull in the register defs (fields, masks, etc) from Nvidia files
68 * so we don't have to redefine them */
70 #ifdef CONFIG_MTD_PARTITIONS
71 static const char *part_probes[] = { "cmdlinepart", NULL, };
74 struct tegra_nand_chip {
80 /* addr >> chip_shift == chip number */
82 /* (addr >> page_shift) & page_mask == page number within chip */
85 /* column within page */
87 /* addr >> block_shift == block number (across the whole mtd dev, not
88 * just a single chip. */
94 struct tegra_nand_info {
95 struct tegra_nand_chip chip;
97 struct tegra_nand_platform *plat;
99 struct mtd_partition *parts;
101 /* synchronizes access to accessing the actual NAND controller */
106 dma_addr_t oob_dma_addr;
107 /* ecc error vector info (offset into page and data mask to apply */
110 /* ecc error status (page number, err_cnt) */
112 uint32_t num_ecc_errs;
113 uint32_t max_ecc_errs;
116 uint32_t command_reg;
118 uint32_t dmactrl_reg;
120 struct completion cmd_complete;
121 struct completion dma_complete;
123 /* bad block bitmap: 1 == good, 0 == bad/unknown */
124 unsigned long *bb_bitmap;
128 #define MTD_TO_INFO(mtd) container_of((mtd), struct tegra_nand_info, mtd)
130 /* 64 byte oob block info for large page (== 2KB) device
132 * OOB flash layout for Tegra with Reed-Solomon 4 symbol correct ECC:
138 * Yaffs2 will use 16 tag bytes.
141 static struct nand_ecclayout tegra_nand_oob_64 = {
144 4, 5, 6, 7, 8, 9, 10, 11, 12,
145 13, 14, 15, 16, 17, 18, 19, 20, 21,
146 22, 23, 24, 25, 26, 27, 28, 29, 30,
147 31, 32, 33, 34, 35, 36, 37, 38, 39,
157 static struct nand_flash_dev *
158 find_nand_flash_device(int dev_id)
160 struct nand_flash_dev *dev = &nand_flash_ids[0];
162 while (dev->name && dev->id != dev_id)
164 return dev->name ? dev : NULL;
167 static struct nand_manufacturers *
168 find_nand_flash_vendor(int vendor_id)
170 struct nand_manufacturers *vendor = &nand_manuf_ids[0];
172 while (vendor->id && vendor->id != vendor_id)
174 return vendor->id ? vendor : NULL;
177 #define REG_NAME(name) { name, #name }
182 REG_NAME(COMMAND_REG),
183 REG_NAME(STATUS_REG),
186 REG_NAME(CONFIG_REG),
187 REG_NAME(TIMING_REG),
189 REG_NAME(TIMING2_REG),
194 REG_NAME(DMA_MST_CTRL_REG),
195 REG_NAME(DMA_CFG_A_REG),
196 REG_NAME(DMA_CFG_B_REG),
197 REG_NAME(FIFO_CTRL_REG),
198 REG_NAME(DATA_BLOCK_PTR_REG),
199 REG_NAME(TAG_PTR_REG),
200 REG_NAME(ECC_PTR_REG),
201 REG_NAME(DEC_STATUS_REG),
202 REG_NAME(HWSTATUS_CMD_REG),
203 REG_NAME(HWSTATUS_MASK_REG),
214 TEGRA_DBG("%s: dumping registers\n", __func__);
215 while (reg_names[i].name != NULL) {
216 TEGRA_DBG("%s = 0x%08x\n", reg_names[i].name, readl(reg_names[i].addr));
219 TEGRA_DBG("%s: end of reg dump\n", __func__);
225 enable_ints(struct tegra_nand_info *info, uint32_t mask)
228 writel(readl(IER_REG) | mask, IER_REG);
233 disable_ints(struct tegra_nand_info *info, uint32_t mask)
236 writel(readl(IER_REG) & ~mask, IER_REG);
241 split_addr(struct tegra_nand_info *info, loff_t offset, int *chipnr, uint32_t *page,
244 *chipnr = (int)(offset >> info->chip.chip_shift);
245 *page = (offset >> info->chip.page_shift) & info->chip.page_mask;
246 *column = offset & info->chip.column_mask;
251 tegra_nand_irq(int irq, void *dev_id)
253 struct tegra_nand_info *info = dev_id;
259 isr = readl(ISR_REG);
260 ier = readl(IER_REG);
261 dma_ctrl = readl(DMA_MST_CTRL_REG);
262 #ifdef DEBUG_DUMP_IRQ
263 pr_info("IRQ: ISR=0x%08x IER=0x%08x DMA_IS=%d DMA_IE=%d\n",
264 isr, ier, !!(dma_ctrl & (1 << 20)), !!(dma_ctrl & (1 << 28)));
266 if (isr & ISR_CMD_DONE) {
267 if (likely(!(readl(COMMAND_REG) & COMMAND_GO)))
268 complete(&info->cmd_complete);
270 pr_err("tegra_nand_irq: Spurious cmd done irq!\n");
273 if (isr & ISR_ECC_ERR) {
274 /* always want to read the decode status so xfers don't stall. */
275 tmp = readl(DEC_STATUS_REG);
277 /* was ECC check actually enabled */
278 if ((ier & IER_ECC_ERR)) {
280 spin_lock_irqsave(&info->ecc_lock, flags);
281 info->ecc_errs[info->num_ecc_errs++] = tmp;
282 spin_unlock_irqrestore(&info->ecc_lock, flags);
286 if ((dma_ctrl & DMA_CTRL_IS_DMA_DONE) &&
287 (dma_ctrl & DMA_CTRL_IE_DMA_DONE)) {
288 complete(&info->dma_complete);
289 writel(dma_ctrl, DMA_MST_CTRL_REG);
292 if ((isr & ISR_UND) && (ier & IER_UND))
293 pr_err("%s: fifo underrun.\n", __func__);
295 if ((isr & ISR_OVR) && (ier & IER_OVR))
296 pr_err("%s: fifo overrun.\n", __func__);
298 /* clear ALL interrupts?! */
299 writel(isr & 0xfffc, ISR_REG);
305 tegra_nand_is_cmd_done(struct tegra_nand_info *info)
307 return (readl(COMMAND_REG) & COMMAND_GO) ? 0 : 1;
311 tegra_nand_wait_cmd_done(struct tegra_nand_info *info)
313 uint32_t timeout = (2 * HZ); /* TODO: make this realistic */
316 ret = wait_for_completion_timeout(&info->cmd_complete, timeout);
318 #ifdef TEGRA_NAND_DEBUG_PEDANTIC
319 BUG_ON(!ret && dump_nand_regs());
322 return ret ? 0 : ret;
326 select_chip(struct tegra_nand_info *info, int chipnr)
328 BUG_ON(chipnr != -1 && chipnr >= info->plat->max_chips);
329 info->chip.curr_chip = chipnr;
333 cfg_hwstatus_mon(struct tegra_nand_info *info)
337 val = (HWSTATUS_RDSTATUS_MASK(1) |
338 HWSTATUS_RDSTATUS_EXP_VAL(0) |
339 HWSTATUS_RBSY_MASK(NAND_STATUS_READY) |
340 HWSTATUS_RBSY_EXP_VAL(NAND_STATUS_READY));
341 writel(NAND_CMD_STATUS, HWSTATUS_CMD_REG);
342 writel(val, HWSTATUS_MASK_REG);
345 /* Tells the NAND controller to initiate the command. */
347 tegra_nand_go(struct tegra_nand_info *info)
349 BUG_ON(!tegra_nand_is_cmd_done(info));
351 INIT_COMPLETION(info->cmd_complete);
352 writel(info->command_reg | COMMAND_GO, COMMAND_REG);
354 if (unlikely(tegra_nand_wait_cmd_done(info))) {
355 /* TODO: abort command if needed? */
356 pr_err("%s: Timeout while waiting for command\n", __func__);
360 /* TODO: maybe wait for dma here? */
365 tegra_nand_prep_readid(struct tegra_nand_info *info)
367 info->command_reg = (COMMAND_CLE | COMMAND_ALE | COMMAND_PIO | COMMAND_RX |
368 COMMAND_ALE_BYTE_SIZE(0) | COMMAND_TRANS_SIZE(3) |
369 (COMMAND_CE(info->chip.curr_chip)));
370 writel(NAND_CMD_READID, CMD_REG1);
372 writel(0, ADDR_REG1);
373 writel(0, ADDR_REG2);
374 writel(0, CONFIG_REG);
378 tegra_nand_cmd_readid(struct tegra_nand_info *info, uint32_t *chip_id)
382 #ifdef TEGRA_NAND_DEBUG_PEDANTIC
383 BUG_ON(info->chip.curr_chip == -1);
386 tegra_nand_prep_readid(info);
387 err = tegra_nand_go(info);
391 *chip_id = readl(RESP_REG);
396 /* assumes right locks are held */
398 nand_cmd_get_status(struct tegra_nand_info *info, uint32_t *status)
402 info->command_reg = (COMMAND_CLE | COMMAND_PIO | COMMAND_RX |
403 COMMAND_RBSY_CHK | (COMMAND_CE(info->chip.curr_chip)));
404 writel(NAND_CMD_STATUS, CMD_REG1);
406 writel(0, ADDR_REG1);
407 writel(0, ADDR_REG2);
408 writel(CONFIG_COM_BSY, CONFIG_REG);
410 err = tegra_nand_go(info);
414 *status = readl(RESP_REG) & 0xff;
419 /* must be called with lock held */
421 check_block_isbad(struct mtd_info *mtd, loff_t offs)
423 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
424 uint32_t block = offs >> info->chip.block_shift;
431 if (info->bb_bitmap[BIT_WORD(block)] & BIT_MASK(block))
434 offs &= ~(mtd->erasesize - 1);
436 /* Only set COM_BSY. */
437 /* TODO: should come from board file */
438 writel(CONFIG_COM_BSY, CONFIG_REG);
440 split_addr(info, offs, &chipnr, &page, &column);
441 select_chip(info, chipnr);
443 column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
445 /* check fist two pages of the block */
446 for (i = 0; i < 2; ++i) {
448 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
449 COMMAND_ALE_BYTE_SIZE(4) | COMMAND_RX | COMMAND_PIO |
450 COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | COMMAND_RBSY_CHK |
452 writel(NAND_CMD_READ0, CMD_REG1);
453 writel(NAND_CMD_READSTART, CMD_REG2);
455 writel(column | ((page & 0xffff) << 16), ADDR_REG1);
456 writel((page >> 16) & 0xff, ADDR_REG2);
458 /* ... poison me ... */
459 writel(0xaa55aa55, RESP_REG);
460 ret = tegra_nand_go(info);
462 pr_info("baaaaaad\n");
466 if ((readl(RESP_REG) & 0xffff) != 0xffff) {
471 /* Note: The assumption here is that we cannot cross chip
472 * boundary since the we are only looking at the first 2 pages in
473 * a block, i.e. erasesize > writesize ALWAYS */
478 /* update the bitmap if the block is good */
480 set_bit(block, info->bb_bitmap);
486 tegra_nand_block_isbad(struct mtd_info *mtd, loff_t offs)
488 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
491 if (offs >= mtd->size)
494 mutex_lock(&info->lock);
495 ret = check_block_isbad(mtd, offs);
496 mutex_unlock(&info->lock);
500 pr_info("block @ 0x%llx is bad.\n", offs);
502 pr_err("error checking block @ 0x%llx for badness.\n", offs);
510 tegra_nand_block_markbad(struct mtd_info *mtd, loff_t offs)
512 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
513 uint32_t block = offs >> info->chip.block_shift;
520 if (offs >= mtd->size)
523 pr_info("tegra_nand: setting block %d bad\n", block);
525 mutex_lock(&info->lock);
526 offs &= ~(mtd->erasesize - 1);
528 /* mark the block bad in our bitmap */
529 clear_bit(block, info->bb_bitmap);
530 mtd->ecc_stats.badblocks++;
532 /* Only set COM_BSY. */
533 /* TODO: should come from board file */
534 writel(CONFIG_COM_BSY, CONFIG_REG);
536 split_addr(info, offs, &chipnr, &page, &column);
537 select_chip(info, chipnr);
539 column = mtd->writesize & 0xffff; /* force to be the offset of OOB */
541 /* write to fist two pages in the block */
542 for (i = 0; i < 2; ++i) {
544 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
545 COMMAND_ALE_BYTE_SIZE(4) | COMMAND_TX | COMMAND_PIO |
546 COMMAND_TRANS_SIZE(1) | COMMAND_A_VALID | COMMAND_RBSY_CHK |
547 COMMAND_AFT_DAT | COMMAND_SEC_CMD;
548 writel(NAND_CMD_SEQIN, CMD_REG1);
549 writel(NAND_CMD_PAGEPROG, CMD_REG2);
551 writel(column | ((page & 0xffff) << 16), ADDR_REG1);
552 writel((page >> 16) & 0xff, ADDR_REG2);
554 writel(0x0, RESP_REG);
555 ret = tegra_nand_go(info);
559 /* TODO: check if the program op worked? */
564 mutex_unlock(&info->lock);
570 tegra_nand_erase(struct mtd_info *mtd, struct erase_info *instr)
572 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
580 TEGRA_DBG("tegra_nand_erase: addr=0x%08llx len=%lld\n", instr->addr,
583 if ((instr->addr + instr->len) > mtd->size) {
584 pr_err("tegra_nand_erase: Can't erase past end of device\n");
585 instr->state = MTD_ERASE_FAILED;
589 if (instr->addr & (mtd->erasesize - 1)) {
590 pr_err("tegra_nand_erase: addr=0x%08llx not block-aligned\n",
592 instr->state = MTD_ERASE_FAILED;
596 if (instr->len & (mtd->erasesize - 1)) {
597 pr_err("tegra_nand_erase: len=%lld not block-aligned\n",
599 instr->state = MTD_ERASE_FAILED;
603 instr->fail_addr = 0xffffffff;
605 mutex_lock(&info->lock);
607 instr->state = MTD_ERASING;
610 num_blocks = instr->len >> info->chip.block_shift;
612 select_chip(info, -1);
614 while (num_blocks--) {
615 split_addr(info, offs, &chipnr, &page, &column);
616 if (chipnr != info->chip.curr_chip)
617 select_chip(info, chipnr);
618 TEGRA_DBG("tegra_nand_erase: addr=0x%08x, page=0x%08x\n", offs, page);
620 if (check_block_isbad(mtd, offs)) {
621 pr_info("%s: skipping bad block @ 0x%08x\n", __func__, offs);
626 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
627 COMMAND_ALE_BYTE_SIZE(2) | COMMAND_RBSY_CHK | COMMAND_SEC_CMD;
628 writel(NAND_CMD_ERASE1, CMD_REG1);
629 writel(NAND_CMD_ERASE2, CMD_REG2);
631 writel(page & 0xffffff, ADDR_REG1);
632 writel(0, ADDR_REG2);
633 writel(CONFIG_COM_BSY, CONFIG_REG);
635 if (tegra_nand_go(info) != 0) {
636 instr->fail_addr = offs;
640 /* TODO: do we want a timeout here? */
641 if ((nand_cmd_get_status(info, &status) != 0) ||
642 (status & NAND_STATUS_FAIL) ||
643 ((status & NAND_STATUS_READY) != NAND_STATUS_READY)) {
644 instr->fail_addr = offs;
645 pr_info("%s: erase failed @ 0x%08x (stat=0x%08x)\n",
646 __func__, offs, status);
650 offs += mtd->erasesize;
653 instr->state = MTD_ERASE_DONE;
654 mutex_unlock(&info->lock);
655 mtd_erase_callback(instr);
659 instr->state = MTD_ERASE_FAILED;
660 mutex_unlock(&info->lock);
666 dump_mtd_oob_ops(struct mtd_oob_ops *ops)
668 pr_info("%s: oob_ops: mode=%s len=0x%x ooblen=0x%x "
669 "ooboffs=0x%x dat=0x%p oob=0x%p\n", __func__,
670 (ops->mode == MTD_OOB_AUTO ? "MTD_OOB_AUTO" :
671 (ops->mode == MTD_OOB_PLACE ? "MTD_OOB_PLACE" : "MTD_OOB_RAW")),
672 ops->len, ops->ooblen, ops->ooboffs, ops->datbuf, ops->oobbuf);
676 tegra_nand_read(struct mtd_info *mtd, loff_t from, size_t len,
677 size_t *retlen, uint8_t *buf)
679 struct mtd_oob_ops ops;
682 pr_debug("%s: read: from=0x%llx len=0x%x\n", __func__, from, len);
683 ops.mode = MTD_OOB_AUTO;
687 ret = mtd->read_oob(mtd, from, &ops);
688 *retlen = ops.retlen;
693 correct_ecc_errors_on_blank_page(struct tegra_nand_info *info, u8 *datbuf, u8 *oobbuf, unsigned int a_len, unsigned int b_len) {
698 spin_lock_irqsave(&info->ecc_lock, flags);
699 if (info->num_ecc_errs) {
701 for (i = 0; i < a_len; i++)
702 if (datbuf[i] != 0xFF)
706 for (i = 0; i < b_len; i++)
707 if (oobbuf[i] != 0xFF)
711 info->num_ecc_errs = 0;
713 spin_unlock_irqrestore(&info->ecc_lock, flags);
717 update_ecc_counts(struct tegra_nand_info *info, int check_oob)
722 spin_lock_irqsave(&info->ecc_lock, flags);
723 for (i = 0; i < info->num_ecc_errs; ++i) {
725 info->mtd.ecc_stats.corrected +=
726 DEC_STATUS_ERR_CNT(info->ecc_errs[i]);
729 if (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_A)
730 info->mtd.ecc_stats.failed++;
731 if (check_oob && (info->ecc_errs[i] & DEC_STATUS_ECC_FAIL_B))
732 info->mtd.ecc_stats.failed++;
734 info->num_ecc_errs = 0;
735 spin_unlock_irqrestore(&info->ecc_lock, flags);
739 clear_regs(struct tegra_nand_info *info)
741 info->command_reg = 0;
742 info->config_reg = 0;
743 info->dmactrl_reg = 0;
747 prep_transfer_dma(struct tegra_nand_info *info, int rx, int do_ecc, uint32_t page,
748 uint32_t column, dma_addr_t data_dma,
749 uint32_t data_len, dma_addr_t oob_dma, uint32_t oob_len)
751 uint32_t tag_sz = oob_len;
754 pr_info("%s: rx=%d ecc=%d page=%d col=%d data_dma=0x%x "
755 "data_len=0x%08x oob_dma=0x%x ooblen=%d\n", __func__,
756 rx, do_ecc, page, column, data_dma, data_len, oob_dma,
761 COMMAND_CE(info->chip.curr_chip) | COMMAND_CLE | COMMAND_ALE |
762 COMMAND_ALE_BYTE_SIZE(4) | COMMAND_SEC_CMD | COMMAND_RBSY_CHK |
763 COMMAND_TRANS_SIZE(8);
765 info->config_reg = (CONFIG_PAGE_SIZE_SEL(3) | CONFIG_PIPELINE_EN |
768 info->dmactrl_reg = (DMA_CTRL_DMA_GO |
769 DMA_CTRL_DMA_PERF_EN | DMA_CTRL_IE_DMA_DONE |
770 DMA_CTRL_IS_DMA_DONE | DMA_CTRL_BURST_SIZE(4));
774 info->config_reg |= CONFIG_HW_ERR_CORRECTION;
775 info->command_reg |= COMMAND_RX;
776 info->dmactrl_reg |= DMA_CTRL_REUSE_BUFFER;
777 writel(NAND_CMD_READ0, CMD_REG1);
778 writel(NAND_CMD_READSTART, CMD_REG2);
780 info->command_reg |= (COMMAND_TX | COMMAND_AFT_DAT);
781 info->dmactrl_reg |= DMA_CTRL_DIR; /* DMA_RD == TX */
782 writel(NAND_CMD_SEQIN, CMD_REG1);
783 writel(NAND_CMD_PAGEPROG, CMD_REG2);
789 CONFIG_HW_ECC | CONFIG_ECC_SEL | CONFIG_TVALUE(0) |
790 CONFIG_SKIP_SPARE | CONFIG_SKIP_SPARE_SEL(0);
791 info->command_reg |= COMMAND_A_VALID;
792 info->dmactrl_reg |= DMA_CTRL_DMA_EN_A;
793 writel(DMA_CFG_BLOCK_SIZE(data_len - 1), DMA_CFG_A_REG);
794 writel(data_dma, DATA_BLOCK_PTR_REG);
796 column = info->mtd.writesize;
798 column += info->mtd.ecclayout->oobfree[0].offset;
799 writel(0, DMA_CFG_A_REG);
800 writel(0, DATA_BLOCK_PTR_REG);
804 oob_len = info->mtd.oobavail;
805 tag_sz = info->mtd.oobavail;
807 tag_sz += 4; /* size of tag ecc */
809 oob_len += 4; /* size of tag ecc */
810 info->config_reg |= CONFIG_ECC_EN_TAG;
813 oob_len += 4; /* num of skipped bytes */
815 info->command_reg |= COMMAND_B_VALID;
816 info->config_reg |= CONFIG_TAG_BYTE_SIZE(tag_sz - 1);
817 info->dmactrl_reg |= DMA_CTRL_DMA_EN_B;
818 writel(DMA_CFG_BLOCK_SIZE(oob_len - 1), DMA_CFG_B_REG);
819 writel(oob_dma, TAG_PTR_REG);
821 writel(0, DMA_CFG_B_REG);
822 writel(0, TAG_PTR_REG);
825 writel((column & 0xffff) | ((page & 0xffff) << 16), ADDR_REG1);
826 writel((page >> 16) & 0xff, ADDR_REG2);
830 tegra_nand_dma_map(struct device *dev, void *addr, size_t size,
831 enum dma_data_direction dir)
834 unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
835 if (virt_addr_valid(addr))
836 page = virt_to_page(addr);
838 if (WARN_ON(size + offset > PAGE_SIZE))
840 page = vmalloc_to_page(addr);
842 return dma_map_page(dev, page, offset, size, dir);
845 /* if mode == RAW, then we read data only, with no ECC
846 * if mode == PLACE, we read ONLY the OOB data from a raw offset into the spare
848 * if mode == AUTO, we read main data and the OOB data from the oobfree areas as
849 * specified by nand_ecclayout.
852 do_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
854 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
855 struct mtd_ecc_stats old_ecc_stats;
859 uint8_t *datbuf = ops->datbuf;
860 uint8_t *oobbuf = ops->oobbuf;
861 uint32_t len = datbuf ? ops->len : 0;
862 uint32_t ooblen = oobbuf ? ops->ooblen : 0;
867 dma_addr_t datbuf_dma_addr = 0;
870 dump_mtd_oob_ops(mtd, ops);
876 /* TODO: Worry about reads from non-page boundaries later */
877 if (unlikely(from & info->chip.column_mask)) {
878 pr_err("%s: Unaligned read (from 0x%llx) not supported\n",
883 if (likely(ops->mode == MTD_OOB_AUTO)) {
884 oobsz = mtd->oobavail;
886 oobsz = mtd->oobsize;
890 if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
891 pr_err("%s: can't read OOB from multiple pages (%d > %d)\n", __func__,
894 } else if (ops->oobbuf) {
897 page_count = max((uint32_t)(ops->len / mtd->writesize), (uint32_t)1);
900 mutex_lock(&info->lock);
902 memcpy(&old_ecc_stats, &mtd->ecc_stats, sizeof(old_ecc_stats));
905 enable_ints(info, IER_ECC_ERR);
906 writel(info->ecc_addr, ECC_PTR_REG);
908 disable_ints(info, IER_ECC_ERR);
910 split_addr(info, from, &chipnr, &page, &column);
911 select_chip(info, chipnr);
913 /* reset it to point back to beginning of page */
916 while (page_count--) {
917 int a_len = min(mtd->writesize - column, len);
918 int b_len = min(oobsz, ooblen);
921 pr_info("%s: chip:=%d page=%d col=%d\n", __func__, chipnr,
927 datbuf_dma_addr = tegra_nand_dma_map(info->dev, datbuf, a_len, DMA_FROM_DEVICE);
929 prep_transfer_dma(info, 1, do_ecc, page, column, datbuf_dma_addr,
930 a_len, info->oob_dma_addr,
932 writel(info->config_reg, CONFIG_REG);
933 writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
935 INIT_COMPLETION(info->dma_complete);
936 err = tegra_nand_go(info);
940 if (!wait_for_completion_timeout(&info->dma_complete, 2*HZ)) {
941 pr_err("%s: dma completion timeout\n", __func__);
947 /*pr_info("tegra_read_oob: DMA complete\n");*/
949 /* if we are here, transfer is done */
951 dma_unmap_page(info->dev, datbuf_dma_addr, a_len, DMA_FROM_DEVICE);
954 uint32_t ofs = datbuf && oobbuf ? 4 : 0; /* skipped bytes */
955 memcpy(oobbuf, info->oob_dma_buf + ofs, b_len);
958 correct_ecc_errors_on_blank_page(info, datbuf, oobbuf, a_len, b_len);
963 ops->retlen += a_len;
969 ops->oobretlen += b_len;
972 update_ecc_counts(info, oobbuf != NULL);
977 from += mtd->writesize;
980 split_addr(info, from, &chipnr, &page, &column);
981 if (chipnr != info->chip.curr_chip)
982 select_chip(info, chipnr);
985 disable_ints(info, IER_ECC_ERR);
987 if (mtd->ecc_stats.failed != old_ecc_stats.failed)
989 else if (mtd->ecc_stats.corrected != old_ecc_stats.corrected)
994 mutex_unlock(&info->lock);
1001 disable_ints(info, IER_ECC_ERR);
1002 mutex_unlock(&info->lock);
1006 /* just does some parameter checking and calls do_read_oob */
1008 tegra_nand_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1010 if (ops->datbuf && unlikely((from + ops->len) > mtd->size)) {
1011 pr_err("%s: Can't read past end of device.\n", __func__);
1015 if (unlikely(ops->oobbuf && !ops->ooblen)) {
1016 pr_err("%s: Reading 0 bytes from OOB is meaningless\n", __func__);
1020 if (unlikely(ops->mode != MTD_OOB_AUTO)) {
1021 if (ops->oobbuf && ops->datbuf) {
1022 pr_err("%s: can't read OOB + Data in non-AUTO mode.\n",
1026 if ((ops->mode == MTD_OOB_RAW) && !ops->datbuf) {
1027 pr_err("%s: Raw mode only supports reading data area.\n",
1033 return do_read_oob(mtd, from, ops);
1037 tegra_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1038 size_t *retlen, const uint8_t *buf)
1040 struct mtd_oob_ops ops;
1043 pr_debug("%s: write: to=0x%llx len=0x%x\n", __func__, to, len);
1044 ops.mode = MTD_OOB_AUTO;
1046 ops.datbuf = (uint8_t *)buf;
1048 ret = mtd->write_oob(mtd, to, &ops);
1049 *retlen = ops.retlen;
1054 do_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
1056 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
1060 uint8_t *datbuf = ops->datbuf;
1061 uint8_t *oobbuf = ops->oobbuf;
1062 uint32_t len = datbuf ? ops->len : 0;
1063 uint32_t ooblen = oobbuf ? ops->ooblen : 0;
1065 uint32_t page_count;
1068 dma_addr_t datbuf_dma_addr = 0;
1071 dump_mtd_oob_ops(mtd, ops);
1081 if (likely(ops->mode == MTD_OOB_AUTO)) {
1082 oobsz = mtd->oobavail;
1084 oobsz = mtd->oobsize;
1088 if (unlikely(ops->oobbuf && ops->ooblen > oobsz)) {
1089 pr_err("%s: can't write OOB to multiple pages (%d > %d)\n",
1090 __func__, ops->ooblen, oobsz);
1092 } else if (ops->oobbuf) {
1095 page_count = max((uint32_t)(ops->len / mtd->writesize), (uint32_t)1);
1097 mutex_lock(&info->lock);
1099 split_addr(info, to, &chipnr, &page, &column);
1100 select_chip(info, chipnr);
1102 while (page_count--) {
1103 int a_len = min(mtd->writesize, len);
1104 int b_len = min(oobsz, ooblen);
1107 datbuf_dma_addr = tegra_nand_dma_map(info->dev, datbuf, a_len, DMA_TO_DEVICE);
1109 memcpy(info->oob_dma_buf, oobbuf, b_len);
1112 prep_transfer_dma(info, 0, do_ecc, page, column, datbuf_dma_addr,
1113 a_len, info->oob_dma_addr, b_len);
1115 writel(info->config_reg, CONFIG_REG);
1116 writel(info->dmactrl_reg, DMA_MST_CTRL_REG);
1118 INIT_COMPLETION(info->dma_complete);
1119 err = tegra_nand_go(info);
1123 if (!wait_for_completion_timeout(&info->dma_complete, 2*HZ)) {
1124 pr_err("%s: dma completion timeout\n", __func__);
1130 dma_unmap_page(info->dev, datbuf_dma_addr, a_len, DMA_TO_DEVICE);
1133 ops->retlen += a_len;
1138 ops->oobretlen += b_len;
1144 to += mtd->writesize;
1147 split_addr(info, to, &chipnr, &page, &column);
1148 if (chipnr != info->chip.curr_chip)
1149 select_chip(info, chipnr);
1152 mutex_unlock(&info->lock);
1159 mutex_unlock(&info->lock);
1164 tegra_nand_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
1166 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
1168 if (unlikely(to & info->chip.column_mask)) {
1169 pr_err("%s: Unaligned write (to 0x%llx) not supported\n",
1174 if (unlikely(ops->oobbuf && !ops->ooblen)) {
1175 pr_err("%s: Writing 0 bytes to OOB is meaningless\n", __func__);
1179 return do_write_oob(mtd, to, ops);
1183 tegra_nand_suspend(struct mtd_info *mtd)
1189 tegra_nand_resume(struct mtd_info *mtd)
1194 scan_bad_blocks(struct tegra_nand_info *info)
1196 struct mtd_info *mtd = &info->mtd;
1197 int num_blocks = mtd->size >> info->chip.block_shift;
1201 for (block = 0; block < num_blocks; ++block) {
1202 /* make sure the bit is cleared, meaning it's bad/unknown before
1204 clear_bit(block, info->bb_bitmap);
1205 is_bad = mtd->block_isbad(mtd, block << info->chip.block_shift);
1208 set_bit(block, info->bb_bitmap);
1209 else if (is_bad > 0)
1210 pr_info("block 0x%08x is bad.\n", block);
1212 pr_err("Fatal error (%d) while scanning for "
1213 "bad blocks\n", is_bad);
1221 set_chip_timing(struct tegra_nand_info *info)
1223 struct tegra_nand_chip_parms *chip_parms = &info->plat->chip_parms[0];
1226 /* TODO: Actually search the chip_parms list for the correct device. */
1227 /* TODO: Get the appropriate frequency from the clock subsystem */
1228 #define NAND_CLK_FREQ 108000
1229 #define CNT(t) (((((t) * NAND_CLK_FREQ) + 1000000 - 1) / 1000000) - 1)
1230 tmp = (TIMING_TRP_RESP(CNT(chip_parms->timing.trp_resp)) |
1231 TIMING_TWB(CNT(chip_parms->timing.twb)) |
1232 TIMING_TCR_TAR_TRR(CNT(chip_parms->timing.tcr_tar_trr)) |
1233 TIMING_TWHR(CNT(chip_parms->timing.twhr)) |
1234 TIMING_TCS(CNT(chip_parms->timing.tcs)) |
1235 TIMING_TWH(CNT(chip_parms->timing.twh)) |
1236 TIMING_TWP(CNT(chip_parms->timing.twp)) |
1237 TIMING_TRH(CNT(chip_parms->timing.trh)) |
1238 TIMING_TRP(CNT(chip_parms->timing.trp)));
1239 writel(tmp, TIMING_REG);
1240 writel(TIMING2_TADL(CNT(chip_parms->timing.tadl)), TIMING2_REG);
1242 #undef NAND_CLK_FREQ
1245 /* Scans for nand flash devices, identifies them, and fills in the
1248 tegra_nand_scan(struct mtd_info *mtd, int maxchips)
1250 struct tegra_nand_info *info = MTD_TO_INFO(mtd);
1251 struct nand_flash_dev *dev_info;
1252 struct nand_manufacturers *vendor_info;
1261 writel(SCAN_TIMING_VAL, TIMING_REG);
1262 writel(SCAN_TIMING2_VAL, TIMING2_REG);
1263 writel(0, CONFIG_REG);
1265 select_chip(info, 0);
1266 err = tegra_nand_cmd_readid(info, &tmp);
1270 vendor_id = tmp & 0xff;
1271 dev_id = (tmp >> 8) & 0xff;
1272 mlc_parms = (tmp >> 16) & 0xff;
1273 dev_parms = (tmp >> 24) & 0xff;
1275 dev_info = find_nand_flash_device(dev_id);
1276 if (dev_info == NULL) {
1277 pr_err("%s: unknown flash device id (0x%02x) found.\n", __func__,
1283 vendor_info = find_nand_flash_vendor(vendor_id);
1284 if (vendor_info == NULL) {
1285 pr_err("%s: unknown flash vendor id (0x%02x) found.\n", __func__,
1291 /* loop through and see if we can find more devices */
1292 for (cnt = 1; cnt < info->plat->max_chips; ++cnt) {
1293 select_chip(info, cnt);
1294 /* TODO: figure out what to do about errors here */
1295 err = tegra_nand_cmd_readid(info, &tmp);
1298 if ((dev_id != ((tmp >> 8) & 0xff)) ||
1299 (vendor_id != (tmp & 0xff)))
1303 pr_info("%s: %d NAND chip(s) found (vend=0x%02x, dev=0x%02x) (%s %s)\n",
1304 DRIVER_NAME, cnt, vendor_id, dev_id, vendor_info->name,
1306 info->chip.num_chips = cnt;
1307 info->chip.chipsize = dev_info->chipsize << 20;
1308 mtd->size = info->chip.num_chips * info->chip.chipsize;
1310 /* format of 4th id byte returned by READ ID
1312 * bit 6 = bus width. 1 == 16bit, 0 == 8bit
1313 * bits 5:4 = data block size. 64kb * (2^val)
1315 * bit 2 = spare area size / 512 bytes. 0 == 8bytes, 1 == 16bytes
1316 * bits 1:0 = page size. 1kb * (2^val)
1319 /* TODO: we should reconcile the information read from chip and
1320 * the data given to us in tegra_nand_platform->chip_parms??
1321 * platform data will give us timing information. */
1324 tmp = dev_parms & 0x3;
1325 mtd->writesize = 1024 << tmp;
1326 info->chip.column_mask = mtd->writesize - 1;
1328 /* Note: See oob layout description of why we only support 2k pages. */
1329 if (mtd->writesize > 2048) {
1330 pr_err("%s: Large page devices with pagesize > 2kb are NOT "
1331 "supported\n", __func__);
1333 } else if (mtd->writesize < 2048) {
1334 pr_err("%s: Small page devices are NOT supported\n", __func__);
1338 /* spare area, must be at least 64 bytes */
1339 tmp = (dev_parms >> 2) & 0x1;
1340 tmp = (8 << tmp) * (mtd->writesize / 512);
1342 pr_err("%s: Spare area (%d bytes) too small\n", __func__, tmp);
1346 mtd->oobavail = tegra_nand_oob_64.oobavail;
1348 /* data block size (erase size) (w/o spare) */
1349 tmp = (dev_parms >> 4) & 0x3;
1350 mtd->erasesize = (64 * 1024) << tmp;
1351 info->chip.block_shift = ffs(mtd->erasesize) - 1;
1353 /* used to select the appropriate chip/page in case multiple devices
1355 info->chip.chip_shift = ffs(info->chip.chipsize) - 1;
1356 info->chip.page_shift = ffs(mtd->writesize) - 1;
1357 info->chip.page_mask =
1358 (info->chip.chipsize >> info->chip.page_shift) - 1;
1360 /* now fill in the rest of the mtd fields */
1361 mtd->ecclayout = &tegra_nand_oob_64;
1362 mtd->type = MTD_NANDFLASH;
1363 mtd->flags = MTD_CAP_NANDFLASH;
1365 mtd->erase = tegra_nand_erase;
1368 mtd->unpoint = NULL;
1369 mtd->read = tegra_nand_read;
1370 mtd->write = tegra_nand_write;
1371 mtd->read_oob = tegra_nand_read_oob;
1372 mtd->write_oob = tegra_nand_write_oob;
1374 mtd->resume = tegra_nand_resume;
1375 mtd->suspend = tegra_nand_suspend;
1376 mtd->block_isbad = tegra_nand_block_isbad;
1377 mtd->block_markbad = tegra_nand_block_markbad;
1379 /* TODO: should take vendor_id/device_id */
1380 set_chip_timing(info);
1385 pr_err("%s: NAND device scan aborted due to error(s).\n", __func__);
1389 static int __devinit
1390 tegra_nand_probe(struct platform_device *pdev)
1392 struct tegra_nand_platform *plat = pdev->dev.platform_data;
1393 struct tegra_nand_info *info = NULL;
1394 struct tegra_nand_chip *chip = NULL;
1395 struct mtd_info *mtd = NULL;
1397 uint64_t num_erase_blocks;
1399 pr_debug("%s: probing (%p)\n", __func__, pdev);
1402 pr_err("%s: no platform device info\n", __func__);
1404 } else if (!plat->chip_parms) {
1405 pr_err("%s: no platform nand parms\n", __func__);
1409 info = kzalloc(sizeof(struct tegra_nand_info), GFP_KERNEL);
1411 pr_err("%s: no memory for flash info\n", __func__);
1415 info->dev = &pdev->dev;
1418 platform_set_drvdata(pdev, info);
1420 init_completion(&info->cmd_complete);
1421 init_completion(&info->dma_complete);
1423 mutex_init(&info->lock);
1424 spin_lock_init(&info->ecc_lock);
1427 chip->priv = &info->mtd;
1428 chip->curr_chip = -1;
1431 mtd->name = dev_name(&pdev->dev);
1432 mtd->priv = &info->chip;
1433 mtd->owner = THIS_MODULE;
1435 /* HACK: allocate a dma buffer to hold 1 page oob data */
1436 info->oob_dma_buf = dma_alloc_coherent(NULL, 64,
1437 &info->oob_dma_addr, GFP_KERNEL);
1438 if (!info->oob_dma_buf) {
1443 /* this will store the ecc error vector info */
1444 info->ecc_buf = dma_alloc_coherent(NULL, ECC_BUF_SZ, &info->ecc_addr,
1446 if (!info->ecc_buf) {
1448 goto out_free_dma_buf;
1452 if (!(pdev->resource[0].flags & IORESOURCE_IRQ)) {
1453 pr_err("NAND IRQ resource not defined\n");
1455 goto out_free_ecc_buf;
1458 err = request_irq(pdev->resource[0].start, tegra_nand_irq,
1459 IRQF_SHARED, DRIVER_NAME, info);
1461 pr_err("Unable to request IRQ %d (%d)\n",
1462 pdev->resource[0].start, err);
1463 goto out_free_ecc_buf;
1466 /* TODO: configure pinmux here?? */
1467 info->clk = clk_get(&pdev->dev, NULL);
1468 clk_set_rate(info->clk, 108000000);
1470 cfg_hwstatus_mon(info);
1472 /* clear all pending interrupts */
1473 writel(readl(ISR_REG), ISR_REG);
1475 /* clear dma interrupt */
1476 writel(DMA_CTRL_IS_DMA_DONE, DMA_MST_CTRL_REG);
1478 /* enable interrupts */
1479 disable_ints(info, 0xffffffff);
1480 enable_ints(info, IER_ERR_TRIG_VAL(4) | IER_UND | IER_OVR | IER_CMD_DONE |
1481 IER_ECC_ERR | IER_GIE);
1483 if (tegra_nand_scan(mtd, plat->max_chips)) {
1487 pr_info("%s: NVIDIA Tegra NAND controller @ base=0x%08x irq=%d.\n",
1488 DRIVER_NAME, TEGRA_NAND_PHYS, pdev->resource[0].start);
1490 /* allocate memory to hold the ecc error info */
1491 info->max_ecc_errs = MAX_DMA_SZ / mtd->writesize;
1492 info->ecc_errs = kmalloc(info->max_ecc_errs * sizeof(uint32_t),
1494 if (!info->ecc_errs) {
1499 /* alloc the bad block bitmap */
1500 num_erase_blocks = mtd->size;
1501 do_div(num_erase_blocks, mtd->erasesize);
1502 info->bb_bitmap = kzalloc(BITS_TO_LONGS(num_erase_blocks) *
1503 sizeof(unsigned long), GFP_KERNEL);
1504 if (!info->bb_bitmap) {
1509 err = scan_bad_blocks(info);
1511 goto out_free_bbbmap;
1517 #ifdef CONFIG_MTD_PARTITIONS
1518 err = parse_mtd_partitions(mtd, part_probes, &info->parts, 0);
1520 err = add_mtd_partitions(mtd, info->parts, err);
1521 } else if (err <= 0 && plat->parts) {
1522 err = add_mtd_partitions(mtd, plat->parts, plat->nr_parts);
1525 err = add_mtd_device(mtd);
1527 goto out_free_bbbmap;
1529 dev_set_drvdata(&pdev->dev, info);
1531 pr_debug("%s: probe done.\n", __func__);
1535 kfree(info->bb_bitmap);
1538 kfree(info->ecc_errs);
1541 disable_ints(info, 0xffffffff);
1542 free_irq(pdev->resource[0].start, info);
1545 dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
1548 dma_free_coherent(NULL, 64, info->oob_dma_buf,
1549 info->oob_dma_addr);
1552 platform_set_drvdata(pdev, NULL);
1558 static int __devexit
1559 tegra_nand_remove(struct platform_device *pdev)
1561 struct tegra_nand_info *info = dev_get_drvdata(&pdev->dev);
1563 dev_set_drvdata(&pdev->dev, NULL);
1566 free_irq(pdev->resource[0].start, info);
1567 kfree(info->bb_bitmap);
1568 kfree(info->ecc_errs);
1569 dma_free_coherent(NULL, ECC_BUF_SZ, info->ecc_buf, info->ecc_addr);
1570 dma_free_coherent(NULL, info->mtd.writesize + info->mtd.oobsize,
1571 info->oob_dma_buf, info->oob_dma_addr);
1578 static struct platform_driver tegra_nand_driver = {
1579 .probe = tegra_nand_probe,
1580 .remove = __devexit_p(tegra_nand_remove),
1584 .name = "tegra_nand",
1585 .owner = THIS_MODULE,
1590 tegra_nand_init(void)
1592 return platform_driver_register(&tegra_nand_driver);
1596 tegra_nand_exit(void)
1598 platform_driver_unregister(&tegra_nand_driver);
1601 module_init(tegra_nand_init);
1602 module_exit(tegra_nand_exit);
1604 MODULE_LICENSE("GPL");
1605 MODULE_DESCRIPTION(DRIVER_DESC);