mtd: do not duplicate length and offset checks in drivers
[firefly-linux-kernel-4.4.55.git] / drivers / mtd / mtdconcat.c
1 /*
2  * MTD device concatenation layer
3  *
4  * Copyright © 2002 Robert Kaiser <rkaiser@sysgo.de>
5  * Copyright © 2002-2010 David Woodhouse <dwmw2@infradead.org>
6  *
7  * NAND support by Christian Gan <cgan@iders.ca>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  *
23  */
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/sched.h>
29 #include <linux/types.h>
30 #include <linux/backing-dev.h>
31
32 #include <linux/mtd/mtd.h>
33 #include <linux/mtd/concat.h>
34
35 #include <asm/div64.h>
36
37 /*
38  * Our storage structure:
39  * Subdev points to an array of pointers to struct mtd_info objects
40  * which is allocated along with this structure
41  *
42  */
43 struct mtd_concat {
44         struct mtd_info mtd;
45         int num_subdev;
46         struct mtd_info **subdev;
47 };
48
49 /*
50  * how to calculate the size required for the above structure,
51  * including the pointer array subdev points to:
52  */
53 #define SIZEOF_STRUCT_MTD_CONCAT(num_subdev)    \
54         ((sizeof(struct mtd_concat) + (num_subdev) * sizeof(struct mtd_info *)))
55
56 /*
57  * Given a pointer to the MTD object in the mtd_concat structure,
58  * we can retrieve the pointer to that structure with this macro.
59  */
60 #define CONCAT(x)  ((struct mtd_concat *)(x))
61
62 /*
63  * MTD methods which look up the relevant subdevice, translate the
64  * effective address and pass through to the subdevice.
65  */
66
67 static int
68 concat_read(struct mtd_info *mtd, loff_t from, size_t len,
69             size_t * retlen, u_char * buf)
70 {
71         struct mtd_concat *concat = CONCAT(mtd);
72         int ret = 0, err;
73         int i;
74
75         *retlen = 0;
76
77         for (i = 0; i < concat->num_subdev; i++) {
78                 struct mtd_info *subdev = concat->subdev[i];
79                 size_t size, retsize;
80
81                 if (from >= subdev->size) {
82                         /* Not destined for this subdev */
83                         size = 0;
84                         from -= subdev->size;
85                         continue;
86                 }
87                 if (from + len > subdev->size)
88                         /* First part goes into this subdev */
89                         size = subdev->size - from;
90                 else
91                         /* Entire transaction goes into this subdev */
92                         size = len;
93
94                 err = mtd_read(subdev, from, size, &retsize, buf);
95
96                 /* Save information about bitflips! */
97                 if (unlikely(err)) {
98                         if (mtd_is_eccerr(err)) {
99                                 mtd->ecc_stats.failed++;
100                                 ret = err;
101                         } else if (mtd_is_bitflip(err)) {
102                                 mtd->ecc_stats.corrected++;
103                                 /* Do not overwrite -EBADMSG !! */
104                                 if (!ret)
105                                         ret = err;
106                         } else
107                                 return err;
108                 }
109
110                 *retlen += retsize;
111                 len -= size;
112                 if (len == 0)
113                         return ret;
114
115                 buf += size;
116                 from = 0;
117         }
118         return -EINVAL;
119 }
120
121 static int
122 concat_write(struct mtd_info *mtd, loff_t to, size_t len,
123              size_t * retlen, const u_char * buf)
124 {
125         struct mtd_concat *concat = CONCAT(mtd);
126         int err = -EINVAL;
127         int i;
128
129         if (!(mtd->flags & MTD_WRITEABLE))
130                 return -EROFS;
131
132         *retlen = 0;
133
134         for (i = 0; i < concat->num_subdev; i++) {
135                 struct mtd_info *subdev = concat->subdev[i];
136                 size_t size, retsize;
137
138                 if (to >= subdev->size) {
139                         size = 0;
140                         to -= subdev->size;
141                         continue;
142                 }
143                 if (to + len > subdev->size)
144                         size = subdev->size - to;
145                 else
146                         size = len;
147
148                 if (!(subdev->flags & MTD_WRITEABLE))
149                         err = -EROFS;
150                 else
151                         err = mtd_write(subdev, to, size, &retsize, buf);
152
153                 if (err)
154                         break;
155
156                 *retlen += retsize;
157                 len -= size;
158                 if (len == 0)
159                         break;
160
161                 err = -EINVAL;
162                 buf += size;
163                 to = 0;
164         }
165         return err;
166 }
167
168 static int
169 concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
170                 unsigned long count, loff_t to, size_t * retlen)
171 {
172         struct mtd_concat *concat = CONCAT(mtd);
173         struct kvec *vecs_copy;
174         unsigned long entry_low, entry_high;
175         size_t total_len = 0;
176         int i;
177         int err = -EINVAL;
178
179         if (!(mtd->flags & MTD_WRITEABLE))
180                 return -EROFS;
181
182         *retlen = 0;
183
184         /* Calculate total length of data */
185         for (i = 0; i < count; i++)
186                 total_len += vecs[i].iov_len;
187
188         /* Check alignment */
189         if (mtd->writesize > 1) {
190                 uint64_t __to = to;
191                 if (do_div(__to, mtd->writesize) || (total_len % mtd->writesize))
192                         return -EINVAL;
193         }
194
195         /* make a copy of vecs */
196         vecs_copy = kmemdup(vecs, sizeof(struct kvec) * count, GFP_KERNEL);
197         if (!vecs_copy)
198                 return -ENOMEM;
199
200         entry_low = 0;
201         for (i = 0; i < concat->num_subdev; i++) {
202                 struct mtd_info *subdev = concat->subdev[i];
203                 size_t size, wsize, retsize, old_iov_len;
204
205                 if (to >= subdev->size) {
206                         to -= subdev->size;
207                         continue;
208                 }
209
210                 size = min_t(uint64_t, total_len, subdev->size - to);
211                 wsize = size; /* store for future use */
212
213                 entry_high = entry_low;
214                 while (entry_high < count) {
215                         if (size <= vecs_copy[entry_high].iov_len)
216                                 break;
217                         size -= vecs_copy[entry_high++].iov_len;
218                 }
219
220                 old_iov_len = vecs_copy[entry_high].iov_len;
221                 vecs_copy[entry_high].iov_len = size;
222
223                 if (!(subdev->flags & MTD_WRITEABLE))
224                         err = -EROFS;
225                 else
226                         err = mtd_writev(subdev, &vecs_copy[entry_low],
227                                          entry_high - entry_low + 1, to,
228                                          &retsize);
229
230                 vecs_copy[entry_high].iov_len = old_iov_len - size;
231                 vecs_copy[entry_high].iov_base += size;
232
233                 entry_low = entry_high;
234
235                 if (err)
236                         break;
237
238                 *retlen += retsize;
239                 total_len -= wsize;
240
241                 if (total_len == 0)
242                         break;
243
244                 err = -EINVAL;
245                 to = 0;
246         }
247
248         kfree(vecs_copy);
249         return err;
250 }
251
252 static int
253 concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
254 {
255         struct mtd_concat *concat = CONCAT(mtd);
256         struct mtd_oob_ops devops = *ops;
257         int i, err, ret = 0;
258
259         ops->retlen = ops->oobretlen = 0;
260
261         for (i = 0; i < concat->num_subdev; i++) {
262                 struct mtd_info *subdev = concat->subdev[i];
263
264                 if (from >= subdev->size) {
265                         from -= subdev->size;
266                         continue;
267                 }
268
269                 /* partial read ? */
270                 if (from + devops.len > subdev->size)
271                         devops.len = subdev->size - from;
272
273                 err = mtd_read_oob(subdev, from, &devops);
274                 ops->retlen += devops.retlen;
275                 ops->oobretlen += devops.oobretlen;
276
277                 /* Save information about bitflips! */
278                 if (unlikely(err)) {
279                         if (mtd_is_eccerr(err)) {
280                                 mtd->ecc_stats.failed++;
281                                 ret = err;
282                         } else if (mtd_is_bitflip(err)) {
283                                 mtd->ecc_stats.corrected++;
284                                 /* Do not overwrite -EBADMSG !! */
285                                 if (!ret)
286                                         ret = err;
287                         } else
288                                 return err;
289                 }
290
291                 if (devops.datbuf) {
292                         devops.len = ops->len - ops->retlen;
293                         if (!devops.len)
294                                 return ret;
295                         devops.datbuf += devops.retlen;
296                 }
297                 if (devops.oobbuf) {
298                         devops.ooblen = ops->ooblen - ops->oobretlen;
299                         if (!devops.ooblen)
300                                 return ret;
301                         devops.oobbuf += ops->oobretlen;
302                 }
303
304                 from = 0;
305         }
306         return -EINVAL;
307 }
308
309 static int
310 concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
311 {
312         struct mtd_concat *concat = CONCAT(mtd);
313         struct mtd_oob_ops devops = *ops;
314         int i, err;
315
316         if (!(mtd->flags & MTD_WRITEABLE))
317                 return -EROFS;
318
319         ops->retlen = ops->oobretlen = 0;
320
321         for (i = 0; i < concat->num_subdev; i++) {
322                 struct mtd_info *subdev = concat->subdev[i];
323
324                 if (to >= subdev->size) {
325                         to -= subdev->size;
326                         continue;
327                 }
328
329                 /* partial write ? */
330                 if (to + devops.len > subdev->size)
331                         devops.len = subdev->size - to;
332
333                 err = mtd_write_oob(subdev, to, &devops);
334                 ops->retlen += devops.oobretlen;
335                 if (err)
336                         return err;
337
338                 if (devops.datbuf) {
339                         devops.len = ops->len - ops->retlen;
340                         if (!devops.len)
341                                 return 0;
342                         devops.datbuf += devops.retlen;
343                 }
344                 if (devops.oobbuf) {
345                         devops.ooblen = ops->ooblen - ops->oobretlen;
346                         if (!devops.ooblen)
347                                 return 0;
348                         devops.oobbuf += devops.oobretlen;
349                 }
350                 to = 0;
351         }
352         return -EINVAL;
353 }
354
355 static void concat_erase_callback(struct erase_info *instr)
356 {
357         wake_up((wait_queue_head_t *) instr->priv);
358 }
359
360 static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
361 {
362         int err;
363         wait_queue_head_t waitq;
364         DECLARE_WAITQUEUE(wait, current);
365
366         /*
367          * This code was stol^H^H^H^Hinspired by mtdchar.c
368          */
369         init_waitqueue_head(&waitq);
370
371         erase->mtd = mtd;
372         erase->callback = concat_erase_callback;
373         erase->priv = (unsigned long) &waitq;
374
375         /*
376          * FIXME: Allow INTERRUPTIBLE. Which means
377          * not having the wait_queue head on the stack.
378          */
379         err = mtd_erase(mtd, erase);
380         if (!err) {
381                 set_current_state(TASK_UNINTERRUPTIBLE);
382                 add_wait_queue(&waitq, &wait);
383                 if (erase->state != MTD_ERASE_DONE
384                     && erase->state != MTD_ERASE_FAILED)
385                         schedule();
386                 remove_wait_queue(&waitq, &wait);
387                 set_current_state(TASK_RUNNING);
388
389                 err = (erase->state == MTD_ERASE_FAILED) ? -EIO : 0;
390         }
391         return err;
392 }
393
394 static int concat_erase(struct mtd_info *mtd, struct erase_info *instr)
395 {
396         struct mtd_concat *concat = CONCAT(mtd);
397         struct mtd_info *subdev;
398         int i, err;
399         uint64_t length, offset = 0;
400         struct erase_info *erase;
401
402         if (!(mtd->flags & MTD_WRITEABLE))
403                 return -EROFS;
404
405         /*
406          * Check for proper erase block alignment of the to-be-erased area.
407          * It is easier to do this based on the super device's erase
408          * region info rather than looking at each particular sub-device
409          * in turn.
410          */
411         if (!concat->mtd.numeraseregions) {
412                 /* the easy case: device has uniform erase block size */
413                 if (instr->addr & (concat->mtd.erasesize - 1))
414                         return -EINVAL;
415                 if (instr->len & (concat->mtd.erasesize - 1))
416                         return -EINVAL;
417         } else {
418                 /* device has variable erase size */
419                 struct mtd_erase_region_info *erase_regions =
420                     concat->mtd.eraseregions;
421
422                 /*
423                  * Find the erase region where the to-be-erased area begins:
424                  */
425                 for (i = 0; i < concat->mtd.numeraseregions &&
426                      instr->addr >= erase_regions[i].offset; i++) ;
427                 --i;
428
429                 /*
430                  * Now erase_regions[i] is the region in which the
431                  * to-be-erased area begins. Verify that the starting
432                  * offset is aligned to this region's erase size:
433                  */
434                 if (i < 0 || instr->addr & (erase_regions[i].erasesize - 1))
435                         return -EINVAL;
436
437                 /*
438                  * now find the erase region where the to-be-erased area ends:
439                  */
440                 for (; i < concat->mtd.numeraseregions &&
441                      (instr->addr + instr->len) >= erase_regions[i].offset;
442                      ++i) ;
443                 --i;
444                 /*
445                  * check if the ending offset is aligned to this region's erase size
446                  */
447                 if (i < 0 || ((instr->addr + instr->len) &
448                                         (erase_regions[i].erasesize - 1)))
449                         return -EINVAL;
450         }
451
452         instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
453
454         /* make a local copy of instr to avoid modifying the caller's struct */
455         erase = kmalloc(sizeof (struct erase_info), GFP_KERNEL);
456
457         if (!erase)
458                 return -ENOMEM;
459
460         *erase = *instr;
461         length = instr->len;
462
463         /*
464          * find the subdevice where the to-be-erased area begins, adjust
465          * starting offset to be relative to the subdevice start
466          */
467         for (i = 0; i < concat->num_subdev; i++) {
468                 subdev = concat->subdev[i];
469                 if (subdev->size <= erase->addr) {
470                         erase->addr -= subdev->size;
471                         offset += subdev->size;
472                 } else {
473                         break;
474                 }
475         }
476
477         /* must never happen since size limit has been verified above */
478         BUG_ON(i >= concat->num_subdev);
479
480         /* now do the erase: */
481         err = 0;
482         for (; length > 0; i++) {
483                 /* loop for all subdevices affected by this request */
484                 subdev = concat->subdev[i];     /* get current subdevice */
485
486                 /* limit length to subdevice's size: */
487                 if (erase->addr + length > subdev->size)
488                         erase->len = subdev->size - erase->addr;
489                 else
490                         erase->len = length;
491
492                 if (!(subdev->flags & MTD_WRITEABLE)) {
493                         err = -EROFS;
494                         break;
495                 }
496                 length -= erase->len;
497                 if ((err = concat_dev_erase(subdev, erase))) {
498                         /* sanity check: should never happen since
499                          * block alignment has been checked above */
500                         BUG_ON(err == -EINVAL);
501                         if (erase->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
502                                 instr->fail_addr = erase->fail_addr + offset;
503                         break;
504                 }
505                 /*
506                  * erase->addr specifies the offset of the area to be
507                  * erased *within the current subdevice*. It can be
508                  * non-zero only the first time through this loop, i.e.
509                  * for the first subdevice where blocks need to be erased.
510                  * All the following erases must begin at the start of the
511                  * current subdevice, i.e. at offset zero.
512                  */
513                 erase->addr = 0;
514                 offset += subdev->size;
515         }
516         instr->state = erase->state;
517         kfree(erase);
518         if (err)
519                 return err;
520
521         if (instr->callback)
522                 instr->callback(instr);
523         return 0;
524 }
525
526 static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
527 {
528         struct mtd_concat *concat = CONCAT(mtd);
529         int i, err = -EINVAL;
530
531         for (i = 0; i < concat->num_subdev; i++) {
532                 struct mtd_info *subdev = concat->subdev[i];
533                 uint64_t size;
534
535                 if (ofs >= subdev->size) {
536                         size = 0;
537                         ofs -= subdev->size;
538                         continue;
539                 }
540                 if (ofs + len > subdev->size)
541                         size = subdev->size - ofs;
542                 else
543                         size = len;
544
545                 err = mtd_lock(subdev, ofs, size);
546                 if (err)
547                         break;
548
549                 len -= size;
550                 if (len == 0)
551                         break;
552
553                 err = -EINVAL;
554                 ofs = 0;
555         }
556
557         return err;
558 }
559
560 static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
561 {
562         struct mtd_concat *concat = CONCAT(mtd);
563         int i, err = 0;
564
565         for (i = 0; i < concat->num_subdev; i++) {
566                 struct mtd_info *subdev = concat->subdev[i];
567                 uint64_t size;
568
569                 if (ofs >= subdev->size) {
570                         size = 0;
571                         ofs -= subdev->size;
572                         continue;
573                 }
574                 if (ofs + len > subdev->size)
575                         size = subdev->size - ofs;
576                 else
577                         size = len;
578
579                 err = mtd_unlock(subdev, ofs, size);
580                 if (err)
581                         break;
582
583                 len -= size;
584                 if (len == 0)
585                         break;
586
587                 err = -EINVAL;
588                 ofs = 0;
589         }
590
591         return err;
592 }
593
594 static void concat_sync(struct mtd_info *mtd)
595 {
596         struct mtd_concat *concat = CONCAT(mtd);
597         int i;
598
599         for (i = 0; i < concat->num_subdev; i++) {
600                 struct mtd_info *subdev = concat->subdev[i];
601                 mtd_sync(subdev);
602         }
603 }
604
605 static int concat_suspend(struct mtd_info *mtd)
606 {
607         struct mtd_concat *concat = CONCAT(mtd);
608         int i, rc = 0;
609
610         for (i = 0; i < concat->num_subdev; i++) {
611                 struct mtd_info *subdev = concat->subdev[i];
612                 if ((rc = mtd_suspend(subdev)) < 0)
613                         return rc;
614         }
615         return rc;
616 }
617
618 static void concat_resume(struct mtd_info *mtd)
619 {
620         struct mtd_concat *concat = CONCAT(mtd);
621         int i;
622
623         for (i = 0; i < concat->num_subdev; i++) {
624                 struct mtd_info *subdev = concat->subdev[i];
625                 mtd_resume(subdev);
626         }
627 }
628
629 static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
630 {
631         struct mtd_concat *concat = CONCAT(mtd);
632         int i, res = 0;
633
634         if (!mtd_can_have_bb(concat->subdev[0]))
635                 return res;
636
637         for (i = 0; i < concat->num_subdev; i++) {
638                 struct mtd_info *subdev = concat->subdev[i];
639
640                 if (ofs >= subdev->size) {
641                         ofs -= subdev->size;
642                         continue;
643                 }
644
645                 res = mtd_block_isbad(subdev, ofs);
646                 break;
647         }
648
649         return res;
650 }
651
652 static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
653 {
654         struct mtd_concat *concat = CONCAT(mtd);
655         int i, err = -EINVAL;
656
657         for (i = 0; i < concat->num_subdev; i++) {
658                 struct mtd_info *subdev = concat->subdev[i];
659
660                 if (ofs >= subdev->size) {
661                         ofs -= subdev->size;
662                         continue;
663                 }
664
665                 err = mtd_block_markbad(subdev, ofs);
666                 if (!err)
667                         mtd->ecc_stats.badblocks++;
668                 break;
669         }
670
671         return err;
672 }
673
674 /*
675  * try to support NOMMU mmaps on concatenated devices
676  * - we don't support subdev spanning as we can't guarantee it'll work
677  */
678 static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
679                                               unsigned long len,
680                                               unsigned long offset,
681                                               unsigned long flags)
682 {
683         struct mtd_concat *concat = CONCAT(mtd);
684         int i;
685
686         for (i = 0; i < concat->num_subdev; i++) {
687                 struct mtd_info *subdev = concat->subdev[i];
688
689                 if (offset >= subdev->size) {
690                         offset -= subdev->size;
691                         continue;
692                 }
693
694                 return mtd_get_unmapped_area(subdev, len, offset, flags);
695         }
696
697         return (unsigned long) -ENOSYS;
698 }
699
700 /*
701  * This function constructs a virtual MTD device by concatenating
702  * num_devs MTD devices. A pointer to the new device object is
703  * stored to *new_dev upon success. This function does _not_
704  * register any devices: this is the caller's responsibility.
705  */
706 struct mtd_info *mtd_concat_create(struct mtd_info *subdev[],   /* subdevices to concatenate */
707                                    int num_devs,        /* number of subdevices      */
708                                    const char *name)
709 {                               /* name for the new device   */
710         int i;
711         size_t size;
712         struct mtd_concat *concat;
713         uint32_t max_erasesize, curr_erasesize;
714         int num_erase_region;
715         int max_writebufsize = 0;
716
717         printk(KERN_NOTICE "Concatenating MTD devices:\n");
718         for (i = 0; i < num_devs; i++)
719                 printk(KERN_NOTICE "(%d): \"%s\"\n", i, subdev[i]->name);
720         printk(KERN_NOTICE "into device \"%s\"\n", name);
721
722         /* allocate the device structure */
723         size = SIZEOF_STRUCT_MTD_CONCAT(num_devs);
724         concat = kzalloc(size, GFP_KERNEL);
725         if (!concat) {
726                 printk
727                     ("memory allocation error while creating concatenated device \"%s\"\n",
728                      name);
729                 return NULL;
730         }
731         concat->subdev = (struct mtd_info **) (concat + 1);
732
733         /*
734          * Set up the new "super" device's MTD object structure, check for
735          * incompatibilities between the subdevices.
736          */
737         concat->mtd.type = subdev[0]->type;
738         concat->mtd.flags = subdev[0]->flags;
739         concat->mtd.size = subdev[0]->size;
740         concat->mtd.erasesize = subdev[0]->erasesize;
741         concat->mtd.writesize = subdev[0]->writesize;
742
743         for (i = 0; i < num_devs; i++)
744                 if (max_writebufsize < subdev[i]->writebufsize)
745                         max_writebufsize = subdev[i]->writebufsize;
746         concat->mtd.writebufsize = max_writebufsize;
747
748         concat->mtd.subpage_sft = subdev[0]->subpage_sft;
749         concat->mtd.oobsize = subdev[0]->oobsize;
750         concat->mtd.oobavail = subdev[0]->oobavail;
751         if (subdev[0]->_writev)
752                 concat->mtd._writev = concat_writev;
753         if (subdev[0]->_read_oob)
754                 concat->mtd._read_oob = concat_read_oob;
755         if (subdev[0]->_write_oob)
756                 concat->mtd._write_oob = concat_write_oob;
757         if (subdev[0]->_block_isbad)
758                 concat->mtd._block_isbad = concat_block_isbad;
759         if (subdev[0]->_block_markbad)
760                 concat->mtd._block_markbad = concat_block_markbad;
761
762         concat->mtd.ecc_stats.badblocks = subdev[0]->ecc_stats.badblocks;
763
764         concat->mtd.backing_dev_info = subdev[0]->backing_dev_info;
765
766         concat->subdev[0] = subdev[0];
767
768         for (i = 1; i < num_devs; i++) {
769                 if (concat->mtd.type != subdev[i]->type) {
770                         kfree(concat);
771                         printk("Incompatible device type on \"%s\"\n",
772                                subdev[i]->name);
773                         return NULL;
774                 }
775                 if (concat->mtd.flags != subdev[i]->flags) {
776                         /*
777                          * Expect all flags except MTD_WRITEABLE to be
778                          * equal on all subdevices.
779                          */
780                         if ((concat->mtd.flags ^ subdev[i]->
781                              flags) & ~MTD_WRITEABLE) {
782                                 kfree(concat);
783                                 printk("Incompatible device flags on \"%s\"\n",
784                                        subdev[i]->name);
785                                 return NULL;
786                         } else
787                                 /* if writeable attribute differs,
788                                    make super device writeable */
789                                 concat->mtd.flags |=
790                                     subdev[i]->flags & MTD_WRITEABLE;
791                 }
792
793                 /* only permit direct mapping if the BDIs are all the same
794                  * - copy-mapping is still permitted
795                  */
796                 if (concat->mtd.backing_dev_info !=
797                     subdev[i]->backing_dev_info)
798                         concat->mtd.backing_dev_info =
799                                 &default_backing_dev_info;
800
801                 concat->mtd.size += subdev[i]->size;
802                 concat->mtd.ecc_stats.badblocks +=
803                         subdev[i]->ecc_stats.badblocks;
804                 if (concat->mtd.writesize   !=  subdev[i]->writesize ||
805                     concat->mtd.subpage_sft != subdev[i]->subpage_sft ||
806                     concat->mtd.oobsize    !=  subdev[i]->oobsize ||
807                     !concat->mtd._read_oob  != !subdev[i]->_read_oob ||
808                     !concat->mtd._write_oob != !subdev[i]->_write_oob) {
809                         kfree(concat);
810                         printk("Incompatible OOB or ECC data on \"%s\"\n",
811                                subdev[i]->name);
812                         return NULL;
813                 }
814                 concat->subdev[i] = subdev[i];
815
816         }
817
818         concat->mtd.ecclayout = subdev[0]->ecclayout;
819
820         concat->num_subdev = num_devs;
821         concat->mtd.name = name;
822
823         concat->mtd._erase = concat_erase;
824         concat->mtd._read = concat_read;
825         concat->mtd._write = concat_write;
826         concat->mtd._sync = concat_sync;
827         concat->mtd._lock = concat_lock;
828         concat->mtd._unlock = concat_unlock;
829         concat->mtd._suspend = concat_suspend;
830         concat->mtd._resume = concat_resume;
831         concat->mtd._get_unmapped_area = concat_get_unmapped_area;
832
833         /*
834          * Combine the erase block size info of the subdevices:
835          *
836          * first, walk the map of the new device and see how
837          * many changes in erase size we have
838          */
839         max_erasesize = curr_erasesize = subdev[0]->erasesize;
840         num_erase_region = 1;
841         for (i = 0; i < num_devs; i++) {
842                 if (subdev[i]->numeraseregions == 0) {
843                         /* current subdevice has uniform erase size */
844                         if (subdev[i]->erasesize != curr_erasesize) {
845                                 /* if it differs from the last subdevice's erase size, count it */
846                                 ++num_erase_region;
847                                 curr_erasesize = subdev[i]->erasesize;
848                                 if (curr_erasesize > max_erasesize)
849                                         max_erasesize = curr_erasesize;
850                         }
851                 } else {
852                         /* current subdevice has variable erase size */
853                         int j;
854                         for (j = 0; j < subdev[i]->numeraseregions; j++) {
855
856                                 /* walk the list of erase regions, count any changes */
857                                 if (subdev[i]->eraseregions[j].erasesize !=
858                                     curr_erasesize) {
859                                         ++num_erase_region;
860                                         curr_erasesize =
861                                             subdev[i]->eraseregions[j].
862                                             erasesize;
863                                         if (curr_erasesize > max_erasesize)
864                                                 max_erasesize = curr_erasesize;
865                                 }
866                         }
867                 }
868         }
869
870         if (num_erase_region == 1) {
871                 /*
872                  * All subdevices have the same uniform erase size.
873                  * This is easy:
874                  */
875                 concat->mtd.erasesize = curr_erasesize;
876                 concat->mtd.numeraseregions = 0;
877         } else {
878                 uint64_t tmp64;
879
880                 /*
881                  * erase block size varies across the subdevices: allocate
882                  * space to store the data describing the variable erase regions
883                  */
884                 struct mtd_erase_region_info *erase_region_p;
885                 uint64_t begin, position;
886
887                 concat->mtd.erasesize = max_erasesize;
888                 concat->mtd.numeraseregions = num_erase_region;
889                 concat->mtd.eraseregions = erase_region_p =
890                     kmalloc(num_erase_region *
891                             sizeof (struct mtd_erase_region_info), GFP_KERNEL);
892                 if (!erase_region_p) {
893                         kfree(concat);
894                         printk
895                             ("memory allocation error while creating erase region list"
896                              " for device \"%s\"\n", name);
897                         return NULL;
898                 }
899
900                 /*
901                  * walk the map of the new device once more and fill in
902                  * in erase region info:
903                  */
904                 curr_erasesize = subdev[0]->erasesize;
905                 begin = position = 0;
906                 for (i = 0; i < num_devs; i++) {
907                         if (subdev[i]->numeraseregions == 0) {
908                                 /* current subdevice has uniform erase size */
909                                 if (subdev[i]->erasesize != curr_erasesize) {
910                                         /*
911                                          *  fill in an mtd_erase_region_info structure for the area
912                                          *  we have walked so far:
913                                          */
914                                         erase_region_p->offset = begin;
915                                         erase_region_p->erasesize =
916                                             curr_erasesize;
917                                         tmp64 = position - begin;
918                                         do_div(tmp64, curr_erasesize);
919                                         erase_region_p->numblocks = tmp64;
920                                         begin = position;
921
922                                         curr_erasesize = subdev[i]->erasesize;
923                                         ++erase_region_p;
924                                 }
925                                 position += subdev[i]->size;
926                         } else {
927                                 /* current subdevice has variable erase size */
928                                 int j;
929                                 for (j = 0; j < subdev[i]->numeraseregions; j++) {
930                                         /* walk the list of erase regions, count any changes */
931                                         if (subdev[i]->eraseregions[j].
932                                             erasesize != curr_erasesize) {
933                                                 erase_region_p->offset = begin;
934                                                 erase_region_p->erasesize =
935                                                     curr_erasesize;
936                                                 tmp64 = position - begin;
937                                                 do_div(tmp64, curr_erasesize);
938                                                 erase_region_p->numblocks = tmp64;
939                                                 begin = position;
940
941                                                 curr_erasesize =
942                                                     subdev[i]->eraseregions[j].
943                                                     erasesize;
944                                                 ++erase_region_p;
945                                         }
946                                         position +=
947                                             subdev[i]->eraseregions[j].
948                                             numblocks * (uint64_t)curr_erasesize;
949                                 }
950                         }
951                 }
952                 /* Now write the final entry */
953                 erase_region_p->offset = begin;
954                 erase_region_p->erasesize = curr_erasesize;
955                 tmp64 = position - begin;
956                 do_div(tmp64, curr_erasesize);
957                 erase_region_p->numblocks = tmp64;
958         }
959
960         return &concat->mtd;
961 }
962
963 /*
964  * This function destroys an MTD object obtained from concat_mtd_devs()
965  */
966
967 void mtd_concat_destroy(struct mtd_info *mtd)
968 {
969         struct mtd_concat *concat = CONCAT(mtd);
970         if (concat->mtd.numeraseregions)
971                 kfree(concat->mtd.eraseregions);
972         kfree(concat);
973 }
974
975 EXPORT_SYMBOL(mtd_concat_create);
976 EXPORT_SYMBOL(mtd_concat_destroy);
977
978 MODULE_LICENSE("GPL");
979 MODULE_AUTHOR("Robert Kaiser <rkaiser@sysgo.de>");
980 MODULE_DESCRIPTION("Generic support for concatenating of MTD devices");