2 * MTD device concatenation layer
4 * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5 * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
7 * NAND support by Christian Gan <cgan@iders.ca>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/types.h>
30 #include <linux/backing-dev.h>
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/concat.h>
35 #include <asm/div64.h>
38 * Our storage structure:
39 * Subdev points to an array of pointers to struct mtd_info objects
40 * which is allocated along with this structure
46 struct mtd_info **subdev;
50 * how to calculate the size required for the above structure,
51 * including the pointer array subdev points to:
53 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev) \
54 ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
57 * Given a pointer to the MTD object in the mtd_concat structure,
58 * we can retrieve the pointer to that structure with this macro.
60 #define CONCAT(x) ((struct mtd_concat *)(x))
63 * MTD methods which look up the relevant subdevice, translate the
64 * effective address and pass through to the subdevice.
68 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69 size_t * retlen, u_char * buf)
71 struct mtd_concat *concat = CONCAT(mtd);
77 for (i = 0; i < concat->num_subdev; i++) {
78 struct mtd_info *subdev = concat->subdev[i];
81 if (from >= subdev->size) {
82 /* Not destined for this subdev */
87 if (from + len > subdev->size)
88 /* First part goes into this subdev */
89 size = subdev->size - from;
91 /* Entire transaction goes into this subdev */
94 err = mtd_read(subdev, from, size, &retsize, buf);
96 /* Save information about bitflips! */
98 if (mtd_is_eccerr(err)) {
99 mtd->ecc_stats.failed++;
101 } else if (mtd_is_bitflip(err)) {
102 mtd->ecc_stats.corrected++;
103 /* Do not overwrite -EBADMSG !! */
122 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
123 size_t * retlen, const u_char * buf)
125 struct mtd_concat *concat = CONCAT(mtd);
129 if (!(mtd->flags & MTD_WRITEABLE))
134 for (i = 0; i < concat->num_subdev; i++) {
135 struct mtd_info *subdev = concat->subdev[i];
136 size_t size, retsize;
138 if (to >= subdev->size) {
143 if (to + len > subdev->size)
144 size = subdev->size - to;
148 if (!(subdev->flags & MTD_WRITEABLE))
151 err = mtd_write(subdev, to, size, &retsize, buf);
169 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
170 unsigned long count, loff_t to, size_t * retlen)
172 struct mtd_concat *concat = CONCAT(mtd);
173 struct kvec *vecs_copy;
174 unsigned long entry_low, entry_high;
175 size_t total_len = 0;
179 if (!(mtd->flags & MTD_WRITEABLE))
184 /* Calculate total length of data */
185 for (i = 0; i < count; i++)
186 total_len += vecs[i].iov_len;
188 /* Check alignment */
189 if (mtd->writesize > 1) {
191 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
195 /* make a copy of vecs */
196 vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
201 for (i = 0; i < concat->num_subdev; i++) {
202 struct mtd_info *subdev = concat->subdev[i];
203 size_t size, wsize, retsize, old_iov_len;
205 if (to >= subdev->size) {
210 size = min_t(uint64_t, total_len, subdev->size - to);
211 wsize = size; /* store for future use */
213 entry_high = entry_low;
214 while (entry_high < count) {
215 if (size <= vecs_copy[entry_high].iov_len)
217 size -= vecs_copy[entry_high++].iov_len;
220 old_iov_len = vecs_copy[entry_high].iov_len;
221 vecs_copy[entry_high].iov_len = size;
223 if (!(subdev->flags & MTD_WRITEABLE))
226 err = mtd_writev(subdev, &vecs_copy[entry_low],
227 entry_high - entry_low + 1, to,
230 vecs_copy[entry_high].iov_len = old_iov_len - size;
231 vecs_copy[entry_high].iov_base += size;
233 entry_low = entry_high;
253 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
255 struct mtd_concat *concat = CONCAT(mtd);
256 struct mtd_oob_ops devops = *ops;
259 ops->retlen = ops->oobretlen = 0;
261 for (i = 0; i < concat->num_subdev; i++) {
262 struct mtd_info *subdev = concat->subdev[i];
264 if (from >= subdev->size) {
265 from -= subdev->size;
270 if (from + devops.len > subdev->size)
271 devops.len = subdev->size - from;
273 err = mtd_read_oob(subdev, from, &devops);
274 ops->retlen += devops.retlen;
275 ops->oobretlen += devops.oobretlen;
277 /* Save information about bitflips! */
279 if (mtd_is_eccerr(err)) {
280 mtd->ecc_stats.failed++;
282 } else if (mtd_is_bitflip(err)) {
283 mtd->ecc_stats.corrected++;
284 /* Do not overwrite -EBADMSG !! */
292 devops.len = ops->len - ops->retlen;
295 devops.datbuf += devops.retlen;
298 devops.ooblen = ops->ooblen - ops->oobretlen;
301 devops.oobbuf += ops->oobretlen;
310 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
312 struct mtd_concat *concat = CONCAT(mtd);
313 struct mtd_oob_ops devops = *ops;
316 if (!(mtd->flags & MTD_WRITEABLE))
319 ops->retlen = ops->oobretlen = 0;
321 for (i = 0; i < concat->num_subdev; i++) {
322 struct mtd_info *subdev = concat->subdev[i];
324 if (to >= subdev->size) {
329 /* partial write ? */
330 if (to + devops.len > subdev->size)
331 devops.len = subdev->size - to;
333 err = mtd_write_oob(subdev, to, &devops);
334 ops->retlen += devops.oobretlen;
339 devops.len = ops->len - ops->retlen;
342 devops.datbuf += devops.retlen;
345 devops.ooblen = ops->ooblen - ops->oobretlen;
348 devops.oobbuf += devops.oobretlen;
355 static void concat_erase_callback(struct erase_info *instr)
357 wake_up((wait_queue_head_t *) instr->priv);
360 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
363 wait_queue_head_t waitq;
364 DECLARE_WAITQUEUE(wait, current);
367 * This code was stol^H^H^H^Hinspired by mtdchar.c
369 init_waitqueue_head(&waitq);
372 erase->callback = concat_erase_callback;
373 erase->priv = (unsigned long) &waitq;
376 * FIXME: Allow INTERRUPTIBLE. Which means
377 * not having the wait_queue head on the stack.
379 err = mtd_erase(mtd, erase);
381 set_current_state(TASK_UNINTERRUPTIBLE);
382 add_wait_queue(&waitq, &wait);
383 if (erase->state != MTD_ERASE_DONE
384 && erase->state != MTD_ERASE_FAILED)
386 remove_wait_queue(&waitq, &wait);
387 set_current_state(TASK_RUNNING);
389 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
394 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
396 struct mtd_concat *concat = CONCAT(mtd);
397 struct mtd_info *subdev;
399 uint64_t length, offset = 0;
400 struct erase_info *erase;
402 if (!(mtd->flags & MTD_WRITEABLE))
406 * Check for proper erase block alignment of the to-be-erased area.
407 * It is easier to do this based on the super device's erase
408 * region info rather than looking at each particular sub-device
411 if (!concat->mtd.numeraseregions) {
412 /* the easy case: device has uniform erase block size */
413 if (instr->addr & (concat->mtd.erasesize - 1))
415 if (instr->len & (concat->mtd.erasesize - 1))
418 /* device has variable erase size */
419 struct mtd_erase_region_info *erase_regions =
420 concat->mtd.eraseregions;
423 * Find the erase region where the to-be-erased area begins:
425 for (i = 0; i < concat->mtd.numeraseregions &&
426 instr->addr >= erase_regions[i].offset; i++) ;
430 * Now erase_regions[i] is the region in which the
431 * to-be-erased area begins. Verify that the starting
432 * offset is aligned to this region's erase size:
434 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
438 * now find the erase region where the to-be-erased area ends:
440 for (; i < concat->mtd.numeraseregions &&
441 (instr->addr + instr->len) >= erase_regions[i].offset;
445 * check if the ending offset is aligned to this region's erase size
447 if (i < 0 || ((instr->addr + instr->len) &
448 (erase_regions[i].erasesize - 1)))
452 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
454 /* make a local copy of instr to avoid modifying the caller's struct */
455 erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
464 * find the subdevice where the to-be-erased area begins, adjust
465 * starting offset to be relative to the subdevice start
467 for (i = 0; i < concat->num_subdev; i++) {
468 subdev = concat->subdev[i];
469 if (subdev->size <= erase->addr) {
470 erase->addr -= subdev->size;
471 offset += subdev->size;
477 /* must never happen since size limit has been verified above */
478 BUG_ON(i >= concat->num_subdev);
480 /* now do the erase: */
482 for (; length > 0; i++) {
483 /* loop for all subdevices affected by this request */
484 subdev = concat->subdev[i]; /* get current subdevice */
486 /* limit length to subdevice's size: */
487 if (erase->addr + length > subdev->size)
488 erase->len = subdev->size - erase->addr;
492 if (!(subdev->flags & MTD_WRITEABLE)) {
496 length -= erase->len;
497 if ((err = concat_dev_erase(subdev, erase))) {
498 /* sanity check: should never happen since
499 * block alignment has been checked above */
500 BUG_ON(err == -EINVAL);
501 if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
502 instr->fail_addr = erase->fail_addr + offset;
506 * erase->addr specifies the offset of the area to be
507 * erased *within the current subdevice*. It can be
508 * non-zero only the first time through this loop, i.e.
509 * for the first subdevice where blocks need to be erased.
510 * All the following erases must begin at the start of the
511 * current subdevice, i.e. at offset zero.
514 offset += subdev->size;
516 instr->state = erase->state;
522 instr->callback(instr);
526 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
528 struct mtd_concat *concat = CONCAT(mtd);
529 int i, err = -EINVAL;
531 for (i = 0; i < concat->num_subdev; i++) {
532 struct mtd_info *subdev = concat->subdev[i];
535 if (ofs >= subdev->size) {
540 if (ofs + len > subdev->size)
541 size = subdev->size - ofs;
545 err = mtd_lock(subdev, ofs, size);
560 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
562 struct mtd_concat *concat = CONCAT(mtd);
565 for (i = 0; i < concat->num_subdev; i++) {
566 struct mtd_info *subdev = concat->subdev[i];
569 if (ofs >= subdev->size) {
574 if (ofs + len > subdev->size)
575 size = subdev->size - ofs;
579 err = mtd_unlock(subdev, ofs, size);
594 static void concat_sync(struct mtd_info *mtd)
596 struct mtd_concat *concat = CONCAT(mtd);
599 for (i = 0; i < concat->num_subdev; i++) {
600 struct mtd_info *subdev = concat->subdev[i];
605 static int concat_suspend(struct mtd_info *mtd)
607 struct mtd_concat *concat = CONCAT(mtd);
610 for (i = 0; i < concat->num_subdev; i++) {
611 struct mtd_info *subdev = concat->subdev[i];
612 if ((rc = mtd_suspend(subdev)) < 0)
618 static void concat_resume(struct mtd_info *mtd)
620 struct mtd_concat *concat = CONCAT(mtd);
623 for (i = 0; i < concat->num_subdev; i++) {
624 struct mtd_info *subdev = concat->subdev[i];
629 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
631 struct mtd_concat *concat = CONCAT(mtd);
634 if (!mtd_can_have_bb(concat->subdev[0]))
637 for (i = 0; i < concat->num_subdev; i++) {
638 struct mtd_info *subdev = concat->subdev[i];
640 if (ofs >= subdev->size) {
645 res = mtd_block_isbad(subdev, ofs);
652 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
654 struct mtd_concat *concat = CONCAT(mtd);
655 int i, err = -EINVAL;
657 for (i = 0; i < concat->num_subdev; i++) {
658 struct mtd_info *subdev = concat->subdev[i];
660 if (ofs >= subdev->size) {
665 err = mtd_block_markbad(subdev, ofs);
667 mtd->ecc_stats.badblocks++;
675 * try to support NOMMU mmaps on concatenated devices
676 * - we don't support subdev spanning as we can't guarantee it'll work
678 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
680 unsigned long offset,
683 struct mtd_concat *concat = CONCAT(mtd);
686 for (i = 0; i < concat->num_subdev; i++) {
687 struct mtd_info *subdev = concat->subdev[i];
689 if (offset >= subdev->size) {
690 offset -= subdev->size;
694 return mtd_get_unmapped_area(subdev, len, offset, flags);
697 return (unsigned long) -ENOSYS;
701 * This function constructs a virtual MTD device by concatenating
702 * num_devs MTD devices. A pointer to the new device object is
703 * stored to *new_dev upon success. This function does _not_
704 * register any devices: this is the caller's responsibility.
706 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to concatenate */
707 int num_devs, /* number of subdevices */
709 { /* name for the new device */
712 struct mtd_concat *concat;
713 uint32_t max_erasesize, curr_erasesize;
714 int num_erase_region;
715 int max_writebufsize = 0;
717 printk(KERN_NOTICE "Concatenating MTD devices:\n");
718 for (i = 0; i < num_devs; i++)
719 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
720 printk(KERN_NOTICE "into device \"%s\"\n", name);
722 /* allocate the device structure */
723 size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
724 concat = kzalloc(size, GFP_KERNEL);
727 ("memory allocation error while creating concatenated device \"%s\"\n",
731 concat->subdev = (struct mtd_info **) (concat + 1);
734 * Set up the new "super" device's MTD object structure, check for
735 * incompatibilities between the subdevices.
737 concat->mtd.type = subdev[0]->type;
738 concat->mtd.flags = subdev[0]->flags;
739 concat->mtd.size = subdev[0]->size;
740 concat->mtd.erasesize = subdev[0]->erasesize;
741 concat->mtd.writesize = subdev[0]->writesize;
743 for (i = 0; i < num_devs; i++)
744 if (max_writebufsize < subdev[i]->writebufsize)
745 max_writebufsize = subdev[i]->writebufsize;
746 concat->mtd.writebufsize = max_writebufsize;
748 concat->mtd.subpage_sft = subdev[0]->subpage_sft;
749 concat->mtd.oobsize = subdev[0]->oobsize;
750 concat->mtd.oobavail = subdev[0]->oobavail;
751 if (subdev[0]->_writev)
752 concat->mtd._writev = concat_writev;
753 if (subdev[0]->_read_oob)
754 concat->mtd._read_oob = concat_read_oob;
755 if (subdev[0]->_write_oob)
756 concat->mtd._write_oob = concat_write_oob;
757 if (subdev[0]->_block_isbad)
758 concat->mtd._block_isbad = concat_block_isbad;
759 if (subdev[0]->_block_markbad)
760 concat->mtd._block_markbad = concat_block_markbad;
762 concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
764 concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
766 concat->subdev[0] = subdev[0];
768 for (i = 1; i < num_devs; i++) {
769 if (concat->mtd.type != subdev[i]->type) {
771 printk("Incompatible device type on \"%s\"\n",
775 if (concat->mtd.flags != subdev[i]->flags) {
777 * Expect all flags except MTD_WRITEABLE to be
778 * equal on all subdevices.
780 if ((concat->mtd.flags ^ subdev[i]->
781 flags) & ~MTD_WRITEABLE) {
783 printk("Incompatible device flags on \"%s\"\n",
787 /* if writeable attribute differs,
788 make super device writeable */
790 subdev[i]->flags & MTD_WRITEABLE;
793 /* only permit direct mapping if the BDIs are all the same
794 * - copy-mapping is still permitted
796 if (concat->mtd.backing_dev_info !=
797 subdev[i]->backing_dev_info)
798 concat->mtd.backing_dev_info =
799 &default_backing_dev_info;
801 concat->mtd.size += subdev[i]->size;
802 concat->mtd.ecc_stats.badblocks +=
803 subdev[i]->ecc_stats.badblocks;
804 if (concat->mtd.writesize != subdev[i]->writesize ||
805 concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
806 concat->mtd.oobsize != subdev[i]->oobsize ||
807 !concat->mtd._read_oob != !subdev[i]->_read_oob ||
808 !concat->mtd._write_oob != !subdev[i]->_write_oob) {
810 printk("Incompatible OOB or ECC data on \"%s\"\n",
814 concat->subdev[i] = subdev[i];
818 concat->mtd.ecclayout = subdev[0]->ecclayout;
820 concat->num_subdev = num_devs;
821 concat->mtd.name = name;
823 concat->mtd._erase = concat_erase;
824 concat->mtd._read = concat_read;
825 concat->mtd._write = concat_write;
826 concat->mtd._sync = concat_sync;
827 concat->mtd._lock = concat_lock;
828 concat->mtd._unlock = concat_unlock;
829 concat->mtd._suspend = concat_suspend;
830 concat->mtd._resume = concat_resume;
831 concat->mtd._get_unmapped_area = concat_get_unmapped_area;
834 * Combine the erase block size info of the subdevices:
836 * first, walk the map of the new device and see how
837 * many changes in erase size we have
839 max_erasesize = curr_erasesize = subdev[0]->erasesize;
840 num_erase_region = 1;
841 for (i = 0; i < num_devs; i++) {
842 if (subdev[i]->numeraseregions == 0) {
843 /* current subdevice has uniform erase size */
844 if (subdev[i]->erasesize != curr_erasesize) {
845 /* if it differs from the last subdevice's erase size, count it */
847 curr_erasesize = subdev[i]->erasesize;
848 if (curr_erasesize > max_erasesize)
849 max_erasesize = curr_erasesize;
852 /* current subdevice has variable erase size */
854 for (j = 0; j < subdev[i]->numeraseregions; j++) {
856 /* walk the list of erase regions, count any changes */
857 if (subdev[i]->eraseregions[j].erasesize !=
861 subdev[i]->eraseregions[j].
863 if (curr_erasesize > max_erasesize)
864 max_erasesize = curr_erasesize;
870 if (num_erase_region == 1) {
872 * All subdevices have the same uniform erase size.
875 concat->mtd.erasesize = curr_erasesize;
876 concat->mtd.numeraseregions = 0;
881 * erase block size varies across the subdevices: allocate
882 * space to store the data describing the variable erase regions
884 struct mtd_erase_region_info *erase_region_p;
885 uint64_t begin, position;
887 concat->mtd.erasesize = max_erasesize;
888 concat->mtd.numeraseregions = num_erase_region;
889 concat->mtd.eraseregions = erase_region_p =
890 kmalloc(num_erase_region *
891 sizeof (struct mtd_erase_region_info), GFP_KERNEL);
892 if (!erase_region_p) {
895 ("memory allocation error while creating erase region list"
896 " for device \"%s\"\n", name);
901 * walk the map of the new device once more and fill in
902 * in erase region info:
904 curr_erasesize = subdev[0]->erasesize;
905 begin = position = 0;
906 for (i = 0; i < num_devs; i++) {
907 if (subdev[i]->numeraseregions == 0) {
908 /* current subdevice has uniform erase size */
909 if (subdev[i]->erasesize != curr_erasesize) {
911 * fill in an mtd_erase_region_info structure for the area
912 * we have walked so far:
914 erase_region_p->offset = begin;
915 erase_region_p->erasesize =
917 tmp64 = position - begin;
918 do_div(tmp64, curr_erasesize);
919 erase_region_p->numblocks = tmp64;
922 curr_erasesize = subdev[i]->erasesize;
925 position += subdev[i]->size;
927 /* current subdevice has variable erase size */
929 for (j = 0; j < subdev[i]->numeraseregions; j++) {
930 /* walk the list of erase regions, count any changes */
931 if (subdev[i]->eraseregions[j].
932 erasesize != curr_erasesize) {
933 erase_region_p->offset = begin;
934 erase_region_p->erasesize =
936 tmp64 = position - begin;
937 do_div(tmp64, curr_erasesize);
938 erase_region_p->numblocks = tmp64;
942 subdev[i]->eraseregions[j].
947 subdev[i]->eraseregions[j].
948 numblocks * (uint64_t)curr_erasesize;
952 /* Now write the final entry */
953 erase_region_p->offset = begin;
954 erase_region_p->erasesize = curr_erasesize;
955 tmp64 = position - begin;
956 do_div(tmp64, curr_erasesize);
957 erase_region_p->numblocks = tmp64;
964 * This function destroys an MTD object obtained from concat_mtd_devs()
967 void mtd_concat_destroy(struct mtd_info *mtd)
969 struct mtd_concat *concat = CONCAT(mtd);
970 if (concat->mtd.numeraseregions)
971 kfree(concat->mtd.eraseregions);
975 EXPORT_SYMBOL(mtd_concat_create);
976 EXPORT_SYMBOL(mtd_concat_destroy);
978 MODULE_LICENSE("GPL");
979 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
980 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");