1c2eb38f3c51a6dcc93148ca34b1a6a1de7aa465
[firefly-linux-kernel-4.4.55.git] / drivers / md / raid10.c
1 /*
2  * raid10.c : Multiple Devices driver for Linux
3  *
4  * Copyright (C) 2000-2004 Neil Brown
5  *
6  * RAID-10 support for md.
7  *
8  * Base on code in raid1.c.  See raid1.c for further copyright information.
9  *
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2, or (at your option)
14  * any later version.
15  *
16  * You should have received a copy of the GNU General Public License
17  * (for example /usr/src/linux/COPYING); if not, write to the Free
18  * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/delay.h>
23 #include <linux/blkdev.h>
24 #include <linux/module.h>
25 #include <linux/seq_file.h>
26 #include <linux/ratelimit.h>
27 #include <linux/kthread.h>
28 #include "md.h"
29 #include "raid10.h"
30 #include "raid0.h"
31 #include "bitmap.h"
32
33 /*
34  * RAID10 provides a combination of RAID0 and RAID1 functionality.
35  * The layout of data is defined by
36  *    chunk_size
37  *    raid_disks
38  *    near_copies (stored in low byte of layout)
39  *    far_copies (stored in second byte of layout)
40  *    far_offset (stored in bit 16 of layout )
41  *
42  * The data to be stored is divided into chunks using chunksize.
43  * Each device is divided into far_copies sections.
44  * In each section, chunks are laid out in a style similar to raid0, but
45  * near_copies copies of each chunk is stored (each on a different drive).
46  * The starting device for each section is offset near_copies from the starting
47  * device of the previous section.
48  * Thus they are (near_copies*far_copies) of each chunk, and each is on a different
49  * drive.
50  * near_copies and far_copies must be at least one, and their product is at most
51  * raid_disks.
52  *
53  * If far_offset is true, then the far_copies are handled a bit differently.
54  * The copies are still in different stripes, but instead of be very far apart
55  * on disk, there are adjacent stripes.
56  */
57
58 /*
59  * Number of guaranteed r10bios in case of extreme VM load:
60  */
61 #define NR_RAID10_BIOS 256
62
63 /* when we get a read error on a read-only array, we redirect to another
64  * device without failing the first device, or trying to over-write to
65  * correct the read error.  To keep track of bad blocks on a per-bio
66  * level, we store IO_BLOCKED in the appropriate 'bios' pointer
67  */
68 #define IO_BLOCKED ((struct bio *)1)
69 /* When we successfully write to a known bad-block, we need to remove the
70  * bad-block marking which must be done from process context.  So we record
71  * the success by setting devs[n].bio to IO_MADE_GOOD
72  */
73 #define IO_MADE_GOOD ((struct bio *)2)
74
75 #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
76
77 /* When there are this many requests queued to be written by
78  * the raid10 thread, we become 'congested' to provide back-pressure
79  * for writeback.
80  */
81 static int max_queued_requests = 1024;
82
83 static void allow_barrier(struct r10conf *conf);
84 static void lower_barrier(struct r10conf *conf);
85 static int enough(struct r10conf *conf, int ignore);
86 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
87                                 int *skipped);
88 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
89 static void end_reshape_write(struct bio *bio, int error);
90 static void end_reshape(struct r10conf *conf);
91
92 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
93 {
94         struct r10conf *conf = data;
95         int size = offsetof(struct r10bio, devs[conf->copies]);
96
97         /* allocate a r10bio with room for raid_disks entries in the
98          * bios array */
99         return kzalloc(size, gfp_flags);
100 }
101
102 static void r10bio_pool_free(void *r10_bio, void *data)
103 {
104         kfree(r10_bio);
105 }
106
107 /* Maximum size of each resync request */
108 #define RESYNC_BLOCK_SIZE (64*1024)
109 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
110 /* amount of memory to reserve for resync requests */
111 #define RESYNC_WINDOW (1024*1024)
112 /* maximum number of concurrent requests, memory permitting */
113 #define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
114
115 /*
116  * When performing a resync, we need to read and compare, so
117  * we need as many pages are there are copies.
118  * When performing a recovery, we need 2 bios, one for read,
119  * one for write (we recover only one drive per r10buf)
120  *
121  */
122 static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
123 {
124         struct r10conf *conf = data;
125         struct page *page;
126         struct r10bio *r10_bio;
127         struct bio *bio;
128         int i, j;
129         int nalloc;
130
131         r10_bio = r10bio_pool_alloc(gfp_flags, conf);
132         if (!r10_bio)
133                 return NULL;
134
135         if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
136             test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
137                 nalloc = conf->copies; /* resync */
138         else
139                 nalloc = 2; /* recovery */
140
141         /*
142          * Allocate bios.
143          */
144         for (j = nalloc ; j-- ; ) {
145                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
146                 if (!bio)
147                         goto out_free_bio;
148                 r10_bio->devs[j].bio = bio;
149                 if (!conf->have_replacement)
150                         continue;
151                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
152                 if (!bio)
153                         goto out_free_bio;
154                 r10_bio->devs[j].repl_bio = bio;
155         }
156         /*
157          * Allocate RESYNC_PAGES data pages and attach them
158          * where needed.
159          */
160         for (j = 0 ; j < nalloc; j++) {
161                 struct bio *rbio = r10_bio->devs[j].repl_bio;
162                 bio = r10_bio->devs[j].bio;
163                 for (i = 0; i < RESYNC_PAGES; i++) {
164                         if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
165                                                &conf->mddev->recovery)) {
166                                 /* we can share bv_page's during recovery
167                                  * and reshape */
168                                 struct bio *rbio = r10_bio->devs[0].bio;
169                                 page = rbio->bi_io_vec[i].bv_page;
170                                 get_page(page);
171                         } else
172                                 page = alloc_page(gfp_flags);
173                         if (unlikely(!page))
174                                 goto out_free_pages;
175
176                         bio->bi_io_vec[i].bv_page = page;
177                         if (rbio)
178                                 rbio->bi_io_vec[i].bv_page = page;
179                 }
180         }
181
182         return r10_bio;
183
184 out_free_pages:
185         for ( ; i > 0 ; i--)
186                 safe_put_page(bio->bi_io_vec[i-1].bv_page);
187         while (j--)
188                 for (i = 0; i < RESYNC_PAGES ; i++)
189                         safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
190         j = 0;
191 out_free_bio:
192         for ( ; j < nalloc; j++) {
193                 if (r10_bio->devs[j].bio)
194                         bio_put(r10_bio->devs[j].bio);
195                 if (r10_bio->devs[j].repl_bio)
196                         bio_put(r10_bio->devs[j].repl_bio);
197         }
198         r10bio_pool_free(r10_bio, conf);
199         return NULL;
200 }
201
202 static void r10buf_pool_free(void *__r10_bio, void *data)
203 {
204         int i;
205         struct r10conf *conf = data;
206         struct r10bio *r10bio = __r10_bio;
207         int j;
208
209         for (j=0; j < conf->copies; j++) {
210                 struct bio *bio = r10bio->devs[j].bio;
211                 if (bio) {
212                         for (i = 0; i < RESYNC_PAGES; i++) {
213                                 safe_put_page(bio->bi_io_vec[i].bv_page);
214                                 bio->bi_io_vec[i].bv_page = NULL;
215                         }
216                         bio_put(bio);
217                 }
218                 bio = r10bio->devs[j].repl_bio;
219                 if (bio)
220                         bio_put(bio);
221         }
222         r10bio_pool_free(r10bio, conf);
223 }
224
225 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
226 {
227         int i;
228
229         for (i = 0; i < conf->copies; i++) {
230                 struct bio **bio = & r10_bio->devs[i].bio;
231                 if (!BIO_SPECIAL(*bio))
232                         bio_put(*bio);
233                 *bio = NULL;
234                 bio = &r10_bio->devs[i].repl_bio;
235                 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
236                         bio_put(*bio);
237                 *bio = NULL;
238         }
239 }
240
241 static void free_r10bio(struct r10bio *r10_bio)
242 {
243         struct r10conf *conf = r10_bio->mddev->private;
244
245         put_all_bios(conf, r10_bio);
246         mempool_free(r10_bio, conf->r10bio_pool);
247 }
248
249 static void put_buf(struct r10bio *r10_bio)
250 {
251         struct r10conf *conf = r10_bio->mddev->private;
252
253         mempool_free(r10_bio, conf->r10buf_pool);
254
255         lower_barrier(conf);
256 }
257
258 static void reschedule_retry(struct r10bio *r10_bio)
259 {
260         unsigned long flags;
261         struct mddev *mddev = r10_bio->mddev;
262         struct r10conf *conf = mddev->private;
263
264         spin_lock_irqsave(&conf->device_lock, flags);
265         list_add(&r10_bio->retry_list, &conf->retry_list);
266         conf->nr_queued ++;
267         spin_unlock_irqrestore(&conf->device_lock, flags);
268
269         /* wake up frozen array... */
270         wake_up(&conf->wait_barrier);
271
272         md_wakeup_thread(mddev->thread);
273 }
274
275 /*
276  * raid_end_bio_io() is called when we have finished servicing a mirrored
277  * operation and are ready to return a success/failure code to the buffer
278  * cache layer.
279  */
280 static void raid_end_bio_io(struct r10bio *r10_bio)
281 {
282         struct bio *bio = r10_bio->master_bio;
283         int done;
284         struct r10conf *conf = r10_bio->mddev->private;
285
286         if (bio->bi_phys_segments) {
287                 unsigned long flags;
288                 spin_lock_irqsave(&conf->device_lock, flags);
289                 bio->bi_phys_segments--;
290                 done = (bio->bi_phys_segments == 0);
291                 spin_unlock_irqrestore(&conf->device_lock, flags);
292         } else
293                 done = 1;
294         if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
295                 clear_bit(BIO_UPTODATE, &bio->bi_flags);
296         if (done) {
297                 bio_endio(bio, 0);
298                 /*
299                  * Wake up any possible resync thread that waits for the device
300                  * to go idle.
301                  */
302                 allow_barrier(conf);
303         }
304         free_r10bio(r10_bio);
305 }
306
307 /*
308  * Update disk head position estimator based on IRQ completion info.
309  */
310 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
311 {
312         struct r10conf *conf = r10_bio->mddev->private;
313
314         conf->mirrors[r10_bio->devs[slot].devnum].head_position =
315                 r10_bio->devs[slot].addr + (r10_bio->sectors);
316 }
317
318 /*
319  * Find the disk number which triggered given bio
320  */
321 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
322                          struct bio *bio, int *slotp, int *replp)
323 {
324         int slot;
325         int repl = 0;
326
327         for (slot = 0; slot < conf->copies; slot++) {
328                 if (r10_bio->devs[slot].bio == bio)
329                         break;
330                 if (r10_bio->devs[slot].repl_bio == bio) {
331                         repl = 1;
332                         break;
333                 }
334         }
335
336         BUG_ON(slot == conf->copies);
337         update_head_pos(slot, r10_bio);
338
339         if (slotp)
340                 *slotp = slot;
341         if (replp)
342                 *replp = repl;
343         return r10_bio->devs[slot].devnum;
344 }
345
346 static void raid10_end_read_request(struct bio *bio, int error)
347 {
348         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
349         struct r10bio *r10_bio = bio->bi_private;
350         int slot, dev;
351         struct md_rdev *rdev;
352         struct r10conf *conf = r10_bio->mddev->private;
353
354
355         slot = r10_bio->read_slot;
356         dev = r10_bio->devs[slot].devnum;
357         rdev = r10_bio->devs[slot].rdev;
358         /*
359          * this branch is our 'one mirror IO has finished' event handler:
360          */
361         update_head_pos(slot, r10_bio);
362
363         if (uptodate) {
364                 /*
365                  * Set R10BIO_Uptodate in our master bio, so that
366                  * we will return a good error code to the higher
367                  * levels even if IO on some other mirrored buffer fails.
368                  *
369                  * The 'master' represents the composite IO operation to
370                  * user-side. So if something waits for IO, then it will
371                  * wait for the 'master' bio.
372                  */
373                 set_bit(R10BIO_Uptodate, &r10_bio->state);
374         } else {
375                 /* If all other devices that store this block have
376                  * failed, we want to return the error upwards rather
377                  * than fail the last device.  Here we redefine
378                  * "uptodate" to mean "Don't want to retry"
379                  */
380                 unsigned long flags;
381                 spin_lock_irqsave(&conf->device_lock, flags);
382                 if (!enough(conf, rdev->raid_disk))
383                         uptodate = 1;
384                 spin_unlock_irqrestore(&conf->device_lock, flags);
385         }
386         if (uptodate) {
387                 raid_end_bio_io(r10_bio);
388                 rdev_dec_pending(rdev, conf->mddev);
389         } else {
390                 /*
391                  * oops, read error - keep the refcount on the rdev
392                  */
393                 char b[BDEVNAME_SIZE];
394                 printk_ratelimited(KERN_ERR
395                                    "md/raid10:%s: %s: rescheduling sector %llu\n",
396                                    mdname(conf->mddev),
397                                    bdevname(rdev->bdev, b),
398                                    (unsigned long long)r10_bio->sector);
399                 set_bit(R10BIO_ReadError, &r10_bio->state);
400                 reschedule_retry(r10_bio);
401         }
402 }
403
404 static void close_write(struct r10bio *r10_bio)
405 {
406         /* clear the bitmap if all writes complete successfully */
407         bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
408                         r10_bio->sectors,
409                         !test_bit(R10BIO_Degraded, &r10_bio->state),
410                         0);
411         md_write_end(r10_bio->mddev);
412 }
413
414 static void one_write_done(struct r10bio *r10_bio)
415 {
416         if (atomic_dec_and_test(&r10_bio->remaining)) {
417                 if (test_bit(R10BIO_WriteError, &r10_bio->state))
418                         reschedule_retry(r10_bio);
419                 else {
420                         close_write(r10_bio);
421                         if (test_bit(R10BIO_MadeGood, &r10_bio->state))
422                                 reschedule_retry(r10_bio);
423                         else
424                                 raid_end_bio_io(r10_bio);
425                 }
426         }
427 }
428
429 static void raid10_end_write_request(struct bio *bio, int error)
430 {
431         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
432         struct r10bio *r10_bio = bio->bi_private;
433         int dev;
434         int dec_rdev = 1;
435         struct r10conf *conf = r10_bio->mddev->private;
436         int slot, repl;
437         struct md_rdev *rdev = NULL;
438
439         dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
440
441         if (repl)
442                 rdev = conf->mirrors[dev].replacement;
443         if (!rdev) {
444                 smp_rmb();
445                 repl = 0;
446                 rdev = conf->mirrors[dev].rdev;
447         }
448         /*
449          * this branch is our 'one mirror IO has finished' event handler:
450          */
451         if (!uptodate) {
452                 if (repl)
453                         /* Never record new bad blocks to replacement,
454                          * just fail it.
455                          */
456                         md_error(rdev->mddev, rdev);
457                 else {
458                         set_bit(WriteErrorSeen, &rdev->flags);
459                         if (!test_and_set_bit(WantReplacement, &rdev->flags))
460                                 set_bit(MD_RECOVERY_NEEDED,
461                                         &rdev->mddev->recovery);
462                         set_bit(R10BIO_WriteError, &r10_bio->state);
463                         dec_rdev = 0;
464                 }
465         } else {
466                 /*
467                  * Set R10BIO_Uptodate in our master bio, so that
468                  * we will return a good error code for to the higher
469                  * levels even if IO on some other mirrored buffer fails.
470                  *
471                  * The 'master' represents the composite IO operation to
472                  * user-side. So if something waits for IO, then it will
473                  * wait for the 'master' bio.
474                  */
475                 sector_t first_bad;
476                 int bad_sectors;
477
478                 set_bit(R10BIO_Uptodate, &r10_bio->state);
479
480                 /* Maybe we can clear some bad blocks. */
481                 if (is_badblock(rdev,
482                                 r10_bio->devs[slot].addr,
483                                 r10_bio->sectors,
484                                 &first_bad, &bad_sectors)) {
485                         bio_put(bio);
486                         if (repl)
487                                 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
488                         else
489                                 r10_bio->devs[slot].bio = IO_MADE_GOOD;
490                         dec_rdev = 0;
491                         set_bit(R10BIO_MadeGood, &r10_bio->state);
492                 }
493         }
494
495         /*
496          *
497          * Let's see if all mirrored write operations have finished
498          * already.
499          */
500         one_write_done(r10_bio);
501         if (dec_rdev)
502                 rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
503 }
504
505 /*
506  * RAID10 layout manager
507  * As well as the chunksize and raid_disks count, there are two
508  * parameters: near_copies and far_copies.
509  * near_copies * far_copies must be <= raid_disks.
510  * Normally one of these will be 1.
511  * If both are 1, we get raid0.
512  * If near_copies == raid_disks, we get raid1.
513  *
514  * Chunks are laid out in raid0 style with near_copies copies of the
515  * first chunk, followed by near_copies copies of the next chunk and
516  * so on.
517  * If far_copies > 1, then after 1/far_copies of the array has been assigned
518  * as described above, we start again with a device offset of near_copies.
519  * So we effectively have another copy of the whole array further down all
520  * the drives, but with blocks on different drives.
521  * With this layout, and block is never stored twice on the one device.
522  *
523  * raid10_find_phys finds the sector offset of a given virtual sector
524  * on each device that it is on.
525  *
526  * raid10_find_virt does the reverse mapping, from a device and a
527  * sector offset to a virtual address
528  */
529
530 static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
531 {
532         int n,f;
533         sector_t sector;
534         sector_t chunk;
535         sector_t stripe;
536         int dev;
537         int slot = 0;
538
539         /* now calculate first sector/dev */
540         chunk = r10bio->sector >> geo->chunk_shift;
541         sector = r10bio->sector & geo->chunk_mask;
542
543         chunk *= geo->near_copies;
544         stripe = chunk;
545         dev = sector_div(stripe, geo->raid_disks);
546         if (geo->far_offset)
547                 stripe *= geo->far_copies;
548
549         sector += stripe << geo->chunk_shift;
550
551         /* and calculate all the others */
552         for (n = 0; n < geo->near_copies; n++) {
553                 int d = dev;
554                 sector_t s = sector;
555                 r10bio->devs[slot].addr = sector;
556                 r10bio->devs[slot].devnum = d;
557                 slot++;
558
559                 for (f = 1; f < geo->far_copies; f++) {
560                         d += geo->near_copies;
561                         if (d >= geo->raid_disks)
562                                 d -= geo->raid_disks;
563                         s += geo->stride;
564                         r10bio->devs[slot].devnum = d;
565                         r10bio->devs[slot].addr = s;
566                         slot++;
567                 }
568                 dev++;
569                 if (dev >= geo->raid_disks) {
570                         dev = 0;
571                         sector += (geo->chunk_mask + 1);
572                 }
573         }
574 }
575
576 static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
577 {
578         struct geom *geo = &conf->geo;
579
580         if (conf->reshape_progress != MaxSector &&
581             ((r10bio->sector >= conf->reshape_progress) !=
582              conf->mddev->reshape_backwards)) {
583                 set_bit(R10BIO_Previous, &r10bio->state);
584                 geo = &conf->prev;
585         } else
586                 clear_bit(R10BIO_Previous, &r10bio->state);
587
588         __raid10_find_phys(geo, r10bio);
589 }
590
591 static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
592 {
593         sector_t offset, chunk, vchunk;
594         /* Never use conf->prev as this is only called during resync
595          * or recovery, so reshape isn't happening
596          */
597         struct geom *geo = &conf->geo;
598
599         offset = sector & geo->chunk_mask;
600         if (geo->far_offset) {
601                 int fc;
602                 chunk = sector >> geo->chunk_shift;
603                 fc = sector_div(chunk, geo->far_copies);
604                 dev -= fc * geo->near_copies;
605                 if (dev < 0)
606                         dev += geo->raid_disks;
607         } else {
608                 while (sector >= geo->stride) {
609                         sector -= geo->stride;
610                         if (dev < geo->near_copies)
611                                 dev += geo->raid_disks - geo->near_copies;
612                         else
613                                 dev -= geo->near_copies;
614                 }
615                 chunk = sector >> geo->chunk_shift;
616         }
617         vchunk = chunk * geo->raid_disks + dev;
618         sector_div(vchunk, geo->near_copies);
619         return (vchunk << geo->chunk_shift) + offset;
620 }
621
622 /**
623  *      raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
624  *      @q: request queue
625  *      @bvm: properties of new bio
626  *      @biovec: the request that could be merged to it.
627  *
628  *      Return amount of bytes we can accept at this offset
629  *      This requires checking for end-of-chunk if near_copies != raid_disks,
630  *      and for subordinate merge_bvec_fns if merge_check_needed.
631  */
632 static int raid10_mergeable_bvec(struct request_queue *q,
633                                  struct bvec_merge_data *bvm,
634                                  struct bio_vec *biovec)
635 {
636         struct mddev *mddev = q->queuedata;
637         struct r10conf *conf = mddev->private;
638         sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
639         int max;
640         unsigned int chunk_sectors;
641         unsigned int bio_sectors = bvm->bi_size >> 9;
642         struct geom *geo = &conf->geo;
643
644         chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
645         if (conf->reshape_progress != MaxSector &&
646             ((sector >= conf->reshape_progress) !=
647              conf->mddev->reshape_backwards))
648                 geo = &conf->prev;
649
650         if (geo->near_copies < geo->raid_disks) {
651                 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
652                                         + bio_sectors)) << 9;
653                 if (max < 0)
654                         /* bio_add cannot handle a negative return */
655                         max = 0;
656                 if (max <= biovec->bv_len && bio_sectors == 0)
657                         return biovec->bv_len;
658         } else
659                 max = biovec->bv_len;
660
661         if (mddev->merge_check_needed) {
662                 struct {
663                         struct r10bio r10_bio;
664                         struct r10dev devs[conf->copies];
665                 } on_stack;
666                 struct r10bio *r10_bio = &on_stack.r10_bio;
667                 int s;
668                 if (conf->reshape_progress != MaxSector) {
669                         /* Cannot give any guidance during reshape */
670                         if (max <= biovec->bv_len && bio_sectors == 0)
671                                 return biovec->bv_len;
672                         return 0;
673                 }
674                 r10_bio->sector = sector;
675                 raid10_find_phys(conf, r10_bio);
676                 rcu_read_lock();
677                 for (s = 0; s < conf->copies; s++) {
678                         int disk = r10_bio->devs[s].devnum;
679                         struct md_rdev *rdev = rcu_dereference(
680                                 conf->mirrors[disk].rdev);
681                         if (rdev && !test_bit(Faulty, &rdev->flags)) {
682                                 struct request_queue *q =
683                                         bdev_get_queue(rdev->bdev);
684                                 if (q->merge_bvec_fn) {
685                                         bvm->bi_sector = r10_bio->devs[s].addr
686                                                 + rdev->data_offset;
687                                         bvm->bi_bdev = rdev->bdev;
688                                         max = min(max, q->merge_bvec_fn(
689                                                           q, bvm, biovec));
690                                 }
691                         }
692                         rdev = rcu_dereference(conf->mirrors[disk].replacement);
693                         if (rdev && !test_bit(Faulty, &rdev->flags)) {
694                                 struct request_queue *q =
695                                         bdev_get_queue(rdev->bdev);
696                                 if (q->merge_bvec_fn) {
697                                         bvm->bi_sector = r10_bio->devs[s].addr
698                                                 + rdev->data_offset;
699                                         bvm->bi_bdev = rdev->bdev;
700                                         max = min(max, q->merge_bvec_fn(
701                                                           q, bvm, biovec));
702                                 }
703                         }
704                 }
705                 rcu_read_unlock();
706         }
707         return max;
708 }
709
710 /*
711  * This routine returns the disk from which the requested read should
712  * be done. There is a per-array 'next expected sequential IO' sector
713  * number - if this matches on the next IO then we use the last disk.
714  * There is also a per-disk 'last know head position' sector that is
715  * maintained from IRQ contexts, both the normal and the resync IO
716  * completion handlers update this position correctly. If there is no
717  * perfect sequential match then we pick the disk whose head is closest.
718  *
719  * If there are 2 mirrors in the same 2 devices, performance degrades
720  * because position is mirror, not device based.
721  *
722  * The rdev for the device selected will have nr_pending incremented.
723  */
724
725 /*
726  * FIXME: possibly should rethink readbalancing and do it differently
727  * depending on near_copies / far_copies geometry.
728  */
729 static struct md_rdev *read_balance(struct r10conf *conf,
730                                     struct r10bio *r10_bio,
731                                     int *max_sectors)
732 {
733         const sector_t this_sector = r10_bio->sector;
734         int disk, slot;
735         int sectors = r10_bio->sectors;
736         int best_good_sectors;
737         sector_t new_distance, best_dist;
738         struct md_rdev *best_rdev, *rdev = NULL;
739         int do_balance;
740         int best_slot;
741         struct geom *geo = &conf->geo;
742
743         raid10_find_phys(conf, r10_bio);
744         rcu_read_lock();
745 retry:
746         sectors = r10_bio->sectors;
747         best_slot = -1;
748         best_rdev = NULL;
749         best_dist = MaxSector;
750         best_good_sectors = 0;
751         do_balance = 1;
752         /*
753          * Check if we can balance. We can balance on the whole
754          * device if no resync is going on (recovery is ok), or below
755          * the resync window. We take the first readable disk when
756          * above the resync window.
757          */
758         if (conf->mddev->recovery_cp < MaxSector
759             && (this_sector + sectors >= conf->next_resync))
760                 do_balance = 0;
761
762         for (slot = 0; slot < conf->copies ; slot++) {
763                 sector_t first_bad;
764                 int bad_sectors;
765                 sector_t dev_sector;
766
767                 if (r10_bio->devs[slot].bio == IO_BLOCKED)
768                         continue;
769                 disk = r10_bio->devs[slot].devnum;
770                 rdev = rcu_dereference(conf->mirrors[disk].replacement);
771                 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
772                     test_bit(Unmerged, &rdev->flags) ||
773                     r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
774                         rdev = rcu_dereference(conf->mirrors[disk].rdev);
775                 if (rdev == NULL ||
776                     test_bit(Faulty, &rdev->flags) ||
777                     test_bit(Unmerged, &rdev->flags))
778                         continue;
779                 if (!test_bit(In_sync, &rdev->flags) &&
780                     r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
781                         continue;
782
783                 dev_sector = r10_bio->devs[slot].addr;
784                 if (is_badblock(rdev, dev_sector, sectors,
785                                 &first_bad, &bad_sectors)) {
786                         if (best_dist < MaxSector)
787                                 /* Already have a better slot */
788                                 continue;
789                         if (first_bad <= dev_sector) {
790                                 /* Cannot read here.  If this is the
791                                  * 'primary' device, then we must not read
792                                  * beyond 'bad_sectors' from another device.
793                                  */
794                                 bad_sectors -= (dev_sector - first_bad);
795                                 if (!do_balance && sectors > bad_sectors)
796                                         sectors = bad_sectors;
797                                 if (best_good_sectors > sectors)
798                                         best_good_sectors = sectors;
799                         } else {
800                                 sector_t good_sectors =
801                                         first_bad - dev_sector;
802                                 if (good_sectors > best_good_sectors) {
803                                         best_good_sectors = good_sectors;
804                                         best_slot = slot;
805                                         best_rdev = rdev;
806                                 }
807                                 if (!do_balance)
808                                         /* Must read from here */
809                                         break;
810                         }
811                         continue;
812                 } else
813                         best_good_sectors = sectors;
814
815                 if (!do_balance)
816                         break;
817
818                 /* This optimisation is debatable, and completely destroys
819                  * sequential read speed for 'far copies' arrays.  So only
820                  * keep it for 'near' arrays, and review those later.
821                  */
822                 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
823                         break;
824
825                 /* for far > 1 always use the lowest address */
826                 if (geo->far_copies > 1)
827                         new_distance = r10_bio->devs[slot].addr;
828                 else
829                         new_distance = abs(r10_bio->devs[slot].addr -
830                                            conf->mirrors[disk].head_position);
831                 if (new_distance < best_dist) {
832                         best_dist = new_distance;
833                         best_slot = slot;
834                         best_rdev = rdev;
835                 }
836         }
837         if (slot >= conf->copies) {
838                 slot = best_slot;
839                 rdev = best_rdev;
840         }
841
842         if (slot >= 0) {
843                 atomic_inc(&rdev->nr_pending);
844                 if (test_bit(Faulty, &rdev->flags)) {
845                         /* Cannot risk returning a device that failed
846                          * before we inc'ed nr_pending
847                          */
848                         rdev_dec_pending(rdev, conf->mddev);
849                         goto retry;
850                 }
851                 r10_bio->read_slot = slot;
852         } else
853                 rdev = NULL;
854         rcu_read_unlock();
855         *max_sectors = best_good_sectors;
856
857         return rdev;
858 }
859
860 int md_raid10_congested(struct mddev *mddev, int bits)
861 {
862         struct r10conf *conf = mddev->private;
863         int i, ret = 0;
864
865         if ((bits & (1 << BDI_async_congested)) &&
866             conf->pending_count >= max_queued_requests)
867                 return 1;
868
869         rcu_read_lock();
870         for (i = 0;
871              (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
872                      && ret == 0;
873              i++) {
874                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
875                 if (rdev && !test_bit(Faulty, &rdev->flags)) {
876                         struct request_queue *q = bdev_get_queue(rdev->bdev);
877
878                         ret |= bdi_congested(&q->backing_dev_info, bits);
879                 }
880         }
881         rcu_read_unlock();
882         return ret;
883 }
884 EXPORT_SYMBOL_GPL(md_raid10_congested);
885
886 static int raid10_congested(void *data, int bits)
887 {
888         struct mddev *mddev = data;
889
890         return mddev_congested(mddev, bits) ||
891                 md_raid10_congested(mddev, bits);
892 }
893
894 static void flush_pending_writes(struct r10conf *conf)
895 {
896         /* Any writes that have been queued but are awaiting
897          * bitmap updates get flushed here.
898          */
899         spin_lock_irq(&conf->device_lock);
900
901         if (conf->pending_bio_list.head) {
902                 struct bio *bio;
903                 bio = bio_list_get(&conf->pending_bio_list);
904                 conf->pending_count = 0;
905                 spin_unlock_irq(&conf->device_lock);
906                 /* flush any pending bitmap writes to disk
907                  * before proceeding w/ I/O */
908                 bitmap_unplug(conf->mddev->bitmap);
909                 wake_up(&conf->wait_barrier);
910
911                 while (bio) { /* submit pending writes */
912                         struct bio *next = bio->bi_next;
913                         bio->bi_next = NULL;
914                         generic_make_request(bio);
915                         bio = next;
916                 }
917         } else
918                 spin_unlock_irq(&conf->device_lock);
919 }
920
921 /* Barriers....
922  * Sometimes we need to suspend IO while we do something else,
923  * either some resync/recovery, or reconfigure the array.
924  * To do this we raise a 'barrier'.
925  * The 'barrier' is a counter that can be raised multiple times
926  * to count how many activities are happening which preclude
927  * normal IO.
928  * We can only raise the barrier if there is no pending IO.
929  * i.e. if nr_pending == 0.
930  * We choose only to raise the barrier if no-one is waiting for the
931  * barrier to go down.  This means that as soon as an IO request
932  * is ready, no other operations which require a barrier will start
933  * until the IO request has had a chance.
934  *
935  * So: regular IO calls 'wait_barrier'.  When that returns there
936  *    is no backgroup IO happening,  It must arrange to call
937  *    allow_barrier when it has finished its IO.
938  * backgroup IO calls must call raise_barrier.  Once that returns
939  *    there is no normal IO happeing.  It must arrange to call
940  *    lower_barrier when the particular background IO completes.
941  */
942
943 static void raise_barrier(struct r10conf *conf, int force)
944 {
945         BUG_ON(force && !conf->barrier);
946         spin_lock_irq(&conf->resync_lock);
947
948         /* Wait until no block IO is waiting (unless 'force') */
949         wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
950                             conf->resync_lock, );
951
952         /* block any new IO from starting */
953         conf->barrier++;
954
955         /* Now wait for all pending IO to complete */
956         wait_event_lock_irq(conf->wait_barrier,
957                             !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
958                             conf->resync_lock, );
959
960         spin_unlock_irq(&conf->resync_lock);
961 }
962
963 static void lower_barrier(struct r10conf *conf)
964 {
965         unsigned long flags;
966         spin_lock_irqsave(&conf->resync_lock, flags);
967         conf->barrier--;
968         spin_unlock_irqrestore(&conf->resync_lock, flags);
969         wake_up(&conf->wait_barrier);
970 }
971
972 static void wait_barrier(struct r10conf *conf)
973 {
974         spin_lock_irq(&conf->resync_lock);
975         if (conf->barrier) {
976                 conf->nr_waiting++;
977                 /* Wait for the barrier to drop.
978                  * However if there are already pending
979                  * requests (preventing the barrier from
980                  * rising completely), and the
981                  * pre-process bio queue isn't empty,
982                  * then don't wait, as we need to empty
983                  * that queue to get the nr_pending
984                  * count down.
985                  */
986                 wait_event_lock_irq(conf->wait_barrier,
987                                     !conf->barrier ||
988                                     (conf->nr_pending &&
989                                      current->bio_list &&
990                                      !bio_list_empty(current->bio_list)),
991                                     conf->resync_lock,
992                         );
993                 conf->nr_waiting--;
994         }
995         conf->nr_pending++;
996         spin_unlock_irq(&conf->resync_lock);
997 }
998
999 static void allow_barrier(struct r10conf *conf)
1000 {
1001         unsigned long flags;
1002         spin_lock_irqsave(&conf->resync_lock, flags);
1003         conf->nr_pending--;
1004         spin_unlock_irqrestore(&conf->resync_lock, flags);
1005         wake_up(&conf->wait_barrier);
1006 }
1007
1008 static void freeze_array(struct r10conf *conf)
1009 {
1010         /* stop syncio and normal IO and wait for everything to
1011          * go quiet.
1012          * We increment barrier and nr_waiting, and then
1013          * wait until nr_pending match nr_queued+1
1014          * This is called in the context of one normal IO request
1015          * that has failed. Thus any sync request that might be pending
1016          * will be blocked by nr_pending, and we need to wait for
1017          * pending IO requests to complete or be queued for re-try.
1018          * Thus the number queued (nr_queued) plus this request (1)
1019          * must match the number of pending IOs (nr_pending) before
1020          * we continue.
1021          */
1022         spin_lock_irq(&conf->resync_lock);
1023         conf->barrier++;
1024         conf->nr_waiting++;
1025         wait_event_lock_irq(conf->wait_barrier,
1026                             conf->nr_pending == conf->nr_queued+1,
1027                             conf->resync_lock,
1028                             flush_pending_writes(conf));
1029
1030         spin_unlock_irq(&conf->resync_lock);
1031 }
1032
1033 static void unfreeze_array(struct r10conf *conf)
1034 {
1035         /* reverse the effect of the freeze */
1036         spin_lock_irq(&conf->resync_lock);
1037         conf->barrier--;
1038         conf->nr_waiting--;
1039         wake_up(&conf->wait_barrier);
1040         spin_unlock_irq(&conf->resync_lock);
1041 }
1042
1043 static sector_t choose_data_offset(struct r10bio *r10_bio,
1044                                    struct md_rdev *rdev)
1045 {
1046         if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1047             test_bit(R10BIO_Previous, &r10_bio->state))
1048                 return rdev->data_offset;
1049         else
1050                 return rdev->new_data_offset;
1051 }
1052
1053 static void make_request(struct mddev *mddev, struct bio * bio)
1054 {
1055         struct r10conf *conf = mddev->private;
1056         struct r10bio *r10_bio;
1057         struct bio *read_bio;
1058         int i;
1059         sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1060         int chunk_sects = chunk_mask + 1;
1061         const int rw = bio_data_dir(bio);
1062         const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1063         const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
1064         unsigned long flags;
1065         struct md_rdev *blocked_rdev;
1066         int sectors_handled;
1067         int max_sectors;
1068         int sectors;
1069
1070         if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1071                 md_flush_request(mddev, bio);
1072                 return;
1073         }
1074
1075         /* If this request crosses a chunk boundary, we need to
1076          * split it.  This will only happen for 1 PAGE (or less) requests.
1077          */
1078         if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9)
1079                      > chunk_sects
1080                      && (conf->geo.near_copies < conf->geo.raid_disks
1081                          || conf->prev.near_copies < conf->prev.raid_disks))) {
1082                 struct bio_pair *bp;
1083                 /* Sanity check -- queue functions should prevent this happening */
1084                 if (bio->bi_vcnt != 1 ||
1085                     bio->bi_idx != 0)
1086                         goto bad_map;
1087                 /* This is a one page bio that upper layers
1088                  * refuse to split for us, so we need to split it.
1089                  */
1090                 bp = bio_split(bio,
1091                                chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
1092
1093                 /* Each of these 'make_request' calls will call 'wait_barrier'.
1094                  * If the first succeeds but the second blocks due to the resync
1095                  * thread raising the barrier, we will deadlock because the
1096                  * IO to the underlying device will be queued in generic_make_request
1097                  * and will never complete, so will never reduce nr_pending.
1098                  * So increment nr_waiting here so no new raise_barriers will
1099                  * succeed, and so the second wait_barrier cannot block.
1100                  */
1101                 spin_lock_irq(&conf->resync_lock);
1102                 conf->nr_waiting++;
1103                 spin_unlock_irq(&conf->resync_lock);
1104
1105                 make_request(mddev, &bp->bio1);
1106                 make_request(mddev, &bp->bio2);
1107
1108                 spin_lock_irq(&conf->resync_lock);
1109                 conf->nr_waiting--;
1110                 wake_up(&conf->wait_barrier);
1111                 spin_unlock_irq(&conf->resync_lock);
1112
1113                 bio_pair_release(bp);
1114                 return;
1115         bad_map:
1116                 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1117                        " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1118                        (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
1119
1120                 bio_io_error(bio);
1121                 return;
1122         }
1123
1124         md_write_start(mddev, bio);
1125
1126         /*
1127          * Register the new request and wait if the reconstruction
1128          * thread has put up a bar for new requests.
1129          * Continue immediately if no resync is active currently.
1130          */
1131         wait_barrier(conf);
1132
1133         sectors = bio->bi_size >> 9;
1134         while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1135             bio->bi_sector < conf->reshape_progress &&
1136             bio->bi_sector + sectors > conf->reshape_progress) {
1137                 /* IO spans the reshape position.  Need to wait for
1138                  * reshape to pass
1139                  */
1140                 allow_barrier(conf);
1141                 wait_event(conf->wait_barrier,
1142                            conf->reshape_progress <= bio->bi_sector ||
1143                            conf->reshape_progress >= bio->bi_sector + sectors);
1144                 wait_barrier(conf);
1145         }
1146         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1147             bio_data_dir(bio) == WRITE &&
1148             (mddev->reshape_backwards
1149              ? (bio->bi_sector < conf->reshape_safe &&
1150                 bio->bi_sector + sectors > conf->reshape_progress)
1151              : (bio->bi_sector + sectors > conf->reshape_safe &&
1152                 bio->bi_sector < conf->reshape_progress))) {
1153                 /* Need to update reshape_position in metadata */
1154                 mddev->reshape_position = conf->reshape_progress;
1155                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1156                 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1157                 md_wakeup_thread(mddev->thread);
1158                 wait_event(mddev->sb_wait,
1159                            !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1160
1161                 conf->reshape_safe = mddev->reshape_position;
1162         }
1163
1164         r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1165
1166         r10_bio->master_bio = bio;
1167         r10_bio->sectors = sectors;
1168
1169         r10_bio->mddev = mddev;
1170         r10_bio->sector = bio->bi_sector;
1171         r10_bio->state = 0;
1172
1173         /* We might need to issue multiple reads to different
1174          * devices if there are bad blocks around, so we keep
1175          * track of the number of reads in bio->bi_phys_segments.
1176          * If this is 0, there is only one r10_bio and no locking
1177          * will be needed when the request completes.  If it is
1178          * non-zero, then it is the number of not-completed requests.
1179          */
1180         bio->bi_phys_segments = 0;
1181         clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1182
1183         if (rw == READ) {
1184                 /*
1185                  * read balancing logic:
1186                  */
1187                 struct md_rdev *rdev;
1188                 int slot;
1189
1190 read_again:
1191                 rdev = read_balance(conf, r10_bio, &max_sectors);
1192                 if (!rdev) {
1193                         raid_end_bio_io(r10_bio);
1194                         return;
1195                 }
1196                 slot = r10_bio->read_slot;
1197
1198                 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1199                 md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
1200                             max_sectors);
1201
1202                 r10_bio->devs[slot].bio = read_bio;
1203                 r10_bio->devs[slot].rdev = rdev;
1204
1205                 read_bio->bi_sector = r10_bio->devs[slot].addr +
1206                         choose_data_offset(r10_bio, rdev);
1207                 read_bio->bi_bdev = rdev->bdev;
1208                 read_bio->bi_end_io = raid10_end_read_request;
1209                 read_bio->bi_rw = READ | do_sync;
1210                 read_bio->bi_private = r10_bio;
1211
1212                 if (max_sectors < r10_bio->sectors) {
1213                         /* Could not read all from this device, so we will
1214                          * need another r10_bio.
1215                          */
1216                         sectors_handled = (r10_bio->sectors + max_sectors
1217                                            - bio->bi_sector);
1218                         r10_bio->sectors = max_sectors;
1219                         spin_lock_irq(&conf->device_lock);
1220                         if (bio->bi_phys_segments == 0)
1221                                 bio->bi_phys_segments = 2;
1222                         else
1223                                 bio->bi_phys_segments++;
1224                         spin_unlock(&conf->device_lock);
1225                         /* Cannot call generic_make_request directly
1226                          * as that will be queued in __generic_make_request
1227                          * and subsequent mempool_alloc might block
1228                          * waiting for it.  so hand bio over to raid10d.
1229                          */
1230                         reschedule_retry(r10_bio);
1231
1232                         r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1233
1234                         r10_bio->master_bio = bio;
1235                         r10_bio->sectors = ((bio->bi_size >> 9)
1236                                             - sectors_handled);
1237                         r10_bio->state = 0;
1238                         r10_bio->mddev = mddev;
1239                         r10_bio->sector = bio->bi_sector + sectors_handled;
1240                         goto read_again;
1241                 } else
1242                         generic_make_request(read_bio);
1243                 return;
1244         }
1245
1246         /*
1247          * WRITE:
1248          */
1249         if (conf->pending_count >= max_queued_requests) {
1250                 md_wakeup_thread(mddev->thread);
1251                 wait_event(conf->wait_barrier,
1252                            conf->pending_count < max_queued_requests);
1253         }
1254         /* first select target devices under rcu_lock and
1255          * inc refcount on their rdev.  Record them by setting
1256          * bios[x] to bio
1257          * If there are known/acknowledged bad blocks on any device
1258          * on which we have seen a write error, we want to avoid
1259          * writing to those blocks.  This potentially requires several
1260          * writes to write around the bad blocks.  Each set of writes
1261          * gets its own r10_bio with a set of bios attached.  The number
1262          * of r10_bios is recored in bio->bi_phys_segments just as with
1263          * the read case.
1264          */
1265
1266         r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1267         raid10_find_phys(conf, r10_bio);
1268 retry_write:
1269         blocked_rdev = NULL;
1270         rcu_read_lock();
1271         max_sectors = r10_bio->sectors;
1272
1273         for (i = 0;  i < conf->copies; i++) {
1274                 int d = r10_bio->devs[i].devnum;
1275                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
1276                 struct md_rdev *rrdev = rcu_dereference(
1277                         conf->mirrors[d].replacement);
1278                 if (rdev == rrdev)
1279                         rrdev = NULL;
1280                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1281                         atomic_inc(&rdev->nr_pending);
1282                         blocked_rdev = rdev;
1283                         break;
1284                 }
1285                 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1286                         atomic_inc(&rrdev->nr_pending);
1287                         blocked_rdev = rrdev;
1288                         break;
1289                 }
1290                 if (rrdev && (test_bit(Faulty, &rrdev->flags)
1291                               || test_bit(Unmerged, &rrdev->flags)))
1292                         rrdev = NULL;
1293
1294                 r10_bio->devs[i].bio = NULL;
1295                 r10_bio->devs[i].repl_bio = NULL;
1296                 if (!rdev || test_bit(Faulty, &rdev->flags) ||
1297                     test_bit(Unmerged, &rdev->flags)) {
1298                         set_bit(R10BIO_Degraded, &r10_bio->state);
1299                         continue;
1300                 }
1301                 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1302                         sector_t first_bad;
1303                         sector_t dev_sector = r10_bio->devs[i].addr;
1304                         int bad_sectors;
1305                         int is_bad;
1306
1307                         is_bad = is_badblock(rdev, dev_sector,
1308                                              max_sectors,
1309                                              &first_bad, &bad_sectors);
1310                         if (is_bad < 0) {
1311                                 /* Mustn't write here until the bad block
1312                                  * is acknowledged
1313                                  */
1314                                 atomic_inc(&rdev->nr_pending);
1315                                 set_bit(BlockedBadBlocks, &rdev->flags);
1316                                 blocked_rdev = rdev;
1317                                 break;
1318                         }
1319                         if (is_bad && first_bad <= dev_sector) {
1320                                 /* Cannot write here at all */
1321                                 bad_sectors -= (dev_sector - first_bad);
1322                                 if (bad_sectors < max_sectors)
1323                                         /* Mustn't write more than bad_sectors
1324                                          * to other devices yet
1325                                          */
1326                                         max_sectors = bad_sectors;
1327                                 /* We don't set R10BIO_Degraded as that
1328                                  * only applies if the disk is missing,
1329                                  * so it might be re-added, and we want to
1330                                  * know to recover this chunk.
1331                                  * In this case the device is here, and the
1332                                  * fact that this chunk is not in-sync is
1333                                  * recorded in the bad block log.
1334                                  */
1335                                 continue;
1336                         }
1337                         if (is_bad) {
1338                                 int good_sectors = first_bad - dev_sector;
1339                                 if (good_sectors < max_sectors)
1340                                         max_sectors = good_sectors;
1341                         }
1342                 }
1343                 r10_bio->devs[i].bio = bio;
1344                 atomic_inc(&rdev->nr_pending);
1345                 if (rrdev) {
1346                         r10_bio->devs[i].repl_bio = bio;
1347                         atomic_inc(&rrdev->nr_pending);
1348                 }
1349         }
1350         rcu_read_unlock();
1351
1352         if (unlikely(blocked_rdev)) {
1353                 /* Have to wait for this device to get unblocked, then retry */
1354                 int j;
1355                 int d;
1356
1357                 for (j = 0; j < i; j++) {
1358                         if (r10_bio->devs[j].bio) {
1359                                 d = r10_bio->devs[j].devnum;
1360                                 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1361                         }
1362                         if (r10_bio->devs[j].repl_bio) {
1363                                 struct md_rdev *rdev;
1364                                 d = r10_bio->devs[j].devnum;
1365                                 rdev = conf->mirrors[d].replacement;
1366                                 if (!rdev) {
1367                                         /* Race with remove_disk */
1368                                         smp_mb();
1369                                         rdev = conf->mirrors[d].rdev;
1370                                 }
1371                                 rdev_dec_pending(rdev, mddev);
1372                         }
1373                 }
1374                 allow_barrier(conf);
1375                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1376                 wait_barrier(conf);
1377                 goto retry_write;
1378         }
1379
1380         if (max_sectors < r10_bio->sectors) {
1381                 /* We are splitting this into multiple parts, so
1382                  * we need to prepare for allocating another r10_bio.
1383                  */
1384                 r10_bio->sectors = max_sectors;
1385                 spin_lock_irq(&conf->device_lock);
1386                 if (bio->bi_phys_segments == 0)
1387                         bio->bi_phys_segments = 2;
1388                 else
1389                         bio->bi_phys_segments++;
1390                 spin_unlock_irq(&conf->device_lock);
1391         }
1392         sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector;
1393
1394         atomic_set(&r10_bio->remaining, 1);
1395         bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
1396
1397         for (i = 0; i < conf->copies; i++) {
1398                 struct bio *mbio;
1399                 int d = r10_bio->devs[i].devnum;
1400                 if (!r10_bio->devs[i].bio)
1401                         continue;
1402
1403                 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1404                 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1405                             max_sectors);
1406                 r10_bio->devs[i].bio = mbio;
1407
1408                 mbio->bi_sector = (r10_bio->devs[i].addr+
1409                                    choose_data_offset(r10_bio,
1410                                                       conf->mirrors[d].rdev));
1411                 mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1412                 mbio->bi_end_io = raid10_end_write_request;
1413                 mbio->bi_rw = WRITE | do_sync | do_fua;
1414                 mbio->bi_private = r10_bio;
1415
1416                 atomic_inc(&r10_bio->remaining);
1417                 spin_lock_irqsave(&conf->device_lock, flags);
1418                 bio_list_add(&conf->pending_bio_list, mbio);
1419                 conf->pending_count++;
1420                 spin_unlock_irqrestore(&conf->device_lock, flags);
1421                 if (!mddev_check_plugged(mddev))
1422                         md_wakeup_thread(mddev->thread);
1423
1424                 if (!r10_bio->devs[i].repl_bio)
1425                         continue;
1426
1427                 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1428                 md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
1429                             max_sectors);
1430                 r10_bio->devs[i].repl_bio = mbio;
1431
1432                 /* We are actively writing to the original device
1433                  * so it cannot disappear, so the replacement cannot
1434                  * become NULL here
1435                  */
1436                 mbio->bi_sector = (r10_bio->devs[i].addr +
1437                                    choose_data_offset(
1438                                            r10_bio,
1439                                            conf->mirrors[d].replacement));
1440                 mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
1441                 mbio->bi_end_io = raid10_end_write_request;
1442                 mbio->bi_rw = WRITE | do_sync | do_fua;
1443                 mbio->bi_private = r10_bio;
1444
1445                 atomic_inc(&r10_bio->remaining);
1446                 spin_lock_irqsave(&conf->device_lock, flags);
1447                 bio_list_add(&conf->pending_bio_list, mbio);
1448                 conf->pending_count++;
1449                 spin_unlock_irqrestore(&conf->device_lock, flags);
1450                 if (!mddev_check_plugged(mddev))
1451                         md_wakeup_thread(mddev->thread);
1452         }
1453
1454         /* Don't remove the bias on 'remaining' (one_write_done) until
1455          * after checking if we need to go around again.
1456          */
1457
1458         if (sectors_handled < (bio->bi_size >> 9)) {
1459                 one_write_done(r10_bio);
1460                 /* We need another r10_bio.  It has already been counted
1461                  * in bio->bi_phys_segments.
1462                  */
1463                 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1464
1465                 r10_bio->master_bio = bio;
1466                 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
1467
1468                 r10_bio->mddev = mddev;
1469                 r10_bio->sector = bio->bi_sector + sectors_handled;
1470                 r10_bio->state = 0;
1471                 goto retry_write;
1472         }
1473         one_write_done(r10_bio);
1474
1475         /* In case raid10d snuck in to freeze_array */
1476         wake_up(&conf->wait_barrier);
1477 }
1478
1479 static void status(struct seq_file *seq, struct mddev *mddev)
1480 {
1481         struct r10conf *conf = mddev->private;
1482         int i;
1483
1484         if (conf->geo.near_copies < conf->geo.raid_disks)
1485                 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
1486         if (conf->geo.near_copies > 1)
1487                 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1488         if (conf->geo.far_copies > 1) {
1489                 if (conf->geo.far_offset)
1490                         seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
1491                 else
1492                         seq_printf(seq, " %d far-copies", conf->geo.far_copies);
1493         }
1494         seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1495                                         conf->geo.raid_disks - mddev->degraded);
1496         for (i = 0; i < conf->geo.raid_disks; i++)
1497                 seq_printf(seq, "%s",
1498                               conf->mirrors[i].rdev &&
1499                               test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
1500         seq_printf(seq, "]");
1501 }
1502
1503 /* check if there are enough drives for
1504  * every block to appear on atleast one.
1505  * Don't consider the device numbered 'ignore'
1506  * as we might be about to remove it.
1507  */
1508 static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
1509 {
1510         int first = 0;
1511
1512         do {
1513                 int n = conf->copies;
1514                 int cnt = 0;
1515                 while (n--) {
1516                         if (conf->mirrors[first].rdev &&
1517                             first != ignore)
1518                                 cnt++;
1519                         first = (first+1) % geo->raid_disks;
1520                 }
1521                 if (cnt == 0)
1522                         return 0;
1523         } while (first != 0);
1524         return 1;
1525 }
1526
1527 static int enough(struct r10conf *conf, int ignore)
1528 {
1529         return _enough(conf, &conf->geo, ignore) &&
1530                 _enough(conf, &conf->prev, ignore);
1531 }
1532
1533 static void error(struct mddev *mddev, struct md_rdev *rdev)
1534 {
1535         char b[BDEVNAME_SIZE];
1536         struct r10conf *conf = mddev->private;
1537
1538         /*
1539          * If it is not operational, then we have already marked it as dead
1540          * else if it is the last working disks, ignore the error, let the
1541          * next level up know.
1542          * else mark the drive as failed
1543          */
1544         if (test_bit(In_sync, &rdev->flags)
1545             && !enough(conf, rdev->raid_disk))
1546                 /*
1547                  * Don't fail the drive, just return an IO error.
1548                  */
1549                 return;
1550         if (test_and_clear_bit(In_sync, &rdev->flags)) {
1551                 unsigned long flags;
1552                 spin_lock_irqsave(&conf->device_lock, flags);
1553                 mddev->degraded++;
1554                 spin_unlock_irqrestore(&conf->device_lock, flags);
1555                 /*
1556                  * if recovery is running, make sure it aborts.
1557                  */
1558                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1559         }
1560         set_bit(Blocked, &rdev->flags);
1561         set_bit(Faulty, &rdev->flags);
1562         set_bit(MD_CHANGE_DEVS, &mddev->flags);
1563         printk(KERN_ALERT
1564                "md/raid10:%s: Disk failure on %s, disabling device.\n"
1565                "md/raid10:%s: Operation continuing on %d devices.\n",
1566                mdname(mddev), bdevname(rdev->bdev, b),
1567                mdname(mddev), conf->geo.raid_disks - mddev->degraded);
1568 }
1569
1570 static void print_conf(struct r10conf *conf)
1571 {
1572         int i;
1573         struct raid10_info *tmp;
1574
1575         printk(KERN_DEBUG "RAID10 conf printout:\n");
1576         if (!conf) {
1577                 printk(KERN_DEBUG "(!conf)\n");
1578                 return;
1579         }
1580         printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1581                 conf->geo.raid_disks);
1582
1583         for (i = 0; i < conf->geo.raid_disks; i++) {
1584                 char b[BDEVNAME_SIZE];
1585                 tmp = conf->mirrors + i;
1586                 if (tmp->rdev)
1587                         printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
1588                                 i, !test_bit(In_sync, &tmp->rdev->flags),
1589                                 !test_bit(Faulty, &tmp->rdev->flags),
1590                                 bdevname(tmp->rdev->bdev,b));
1591         }
1592 }
1593
1594 static void close_sync(struct r10conf *conf)
1595 {
1596         wait_barrier(conf);
1597         allow_barrier(conf);
1598
1599         mempool_destroy(conf->r10buf_pool);
1600         conf->r10buf_pool = NULL;
1601 }
1602
1603 static int raid10_spare_active(struct mddev *mddev)
1604 {
1605         int i;
1606         struct r10conf *conf = mddev->private;
1607         struct raid10_info *tmp;
1608         int count = 0;
1609         unsigned long flags;
1610
1611         /*
1612          * Find all non-in_sync disks within the RAID10 configuration
1613          * and mark them in_sync
1614          */
1615         for (i = 0; i < conf->geo.raid_disks; i++) {
1616                 tmp = conf->mirrors + i;
1617                 if (tmp->replacement
1618                     && tmp->replacement->recovery_offset == MaxSector
1619                     && !test_bit(Faulty, &tmp->replacement->flags)
1620                     && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1621                         /* Replacement has just become active */
1622                         if (!tmp->rdev
1623                             || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1624                                 count++;
1625                         if (tmp->rdev) {
1626                                 /* Replaced device not technically faulty,
1627                                  * but we need to be sure it gets removed
1628                                  * and never re-added.
1629                                  */
1630                                 set_bit(Faulty, &tmp->rdev->flags);
1631                                 sysfs_notify_dirent_safe(
1632                                         tmp->rdev->sysfs_state);
1633                         }
1634                         sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1635                 } else if (tmp->rdev
1636                            && !test_bit(Faulty, &tmp->rdev->flags)
1637                            && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
1638                         count++;
1639                         sysfs_notify_dirent(tmp->rdev->sysfs_state);
1640                 }
1641         }
1642         spin_lock_irqsave(&conf->device_lock, flags);
1643         mddev->degraded -= count;
1644         spin_unlock_irqrestore(&conf->device_lock, flags);
1645
1646         print_conf(conf);
1647         return count;
1648 }
1649
1650
1651 static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1652 {
1653         struct r10conf *conf = mddev->private;
1654         int err = -EEXIST;
1655         int mirror;
1656         int first = 0;
1657         int last = conf->geo.raid_disks - 1;
1658         struct request_queue *q = bdev_get_queue(rdev->bdev);
1659
1660         if (mddev->recovery_cp < MaxSector)
1661                 /* only hot-add to in-sync arrays, as recovery is
1662                  * very different from resync
1663                  */
1664                 return -EBUSY;
1665         if (rdev->saved_raid_disk < 0 && !_enough(conf, &conf->prev, -1))
1666                 return -EINVAL;
1667
1668         if (rdev->raid_disk >= 0)
1669                 first = last = rdev->raid_disk;
1670
1671         if (q->merge_bvec_fn) {
1672                 set_bit(Unmerged, &rdev->flags);
1673                 mddev->merge_check_needed = 1;
1674         }
1675
1676         if (rdev->saved_raid_disk >= first &&
1677             conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1678                 mirror = rdev->saved_raid_disk;
1679         else
1680                 mirror = first;
1681         for ( ; mirror <= last ; mirror++) {
1682                 struct raid10_info *p = &conf->mirrors[mirror];
1683                 if (p->recovery_disabled == mddev->recovery_disabled)
1684                         continue;
1685                 if (p->rdev) {
1686                         if (!test_bit(WantReplacement, &p->rdev->flags) ||
1687                             p->replacement != NULL)
1688                                 continue;
1689                         clear_bit(In_sync, &rdev->flags);
1690                         set_bit(Replacement, &rdev->flags);
1691                         rdev->raid_disk = mirror;
1692                         err = 0;
1693                         disk_stack_limits(mddev->gendisk, rdev->bdev,
1694                                           rdev->data_offset << 9);
1695                         conf->fullsync = 1;
1696                         rcu_assign_pointer(p->replacement, rdev);
1697                         break;
1698                 }
1699
1700                 disk_stack_limits(mddev->gendisk, rdev->bdev,
1701                                   rdev->data_offset << 9);
1702
1703                 p->head_position = 0;
1704                 p->recovery_disabled = mddev->recovery_disabled - 1;
1705                 rdev->raid_disk = mirror;
1706                 err = 0;
1707                 if (rdev->saved_raid_disk != mirror)
1708                         conf->fullsync = 1;
1709                 rcu_assign_pointer(p->rdev, rdev);
1710                 break;
1711         }
1712         if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1713                 /* Some requests might not have seen this new
1714                  * merge_bvec_fn.  We must wait for them to complete
1715                  * before merging the device fully.
1716                  * First we make sure any code which has tested
1717                  * our function has submitted the request, then
1718                  * we wait for all outstanding requests to complete.
1719                  */
1720                 synchronize_sched();
1721                 raise_barrier(conf, 0);
1722                 lower_barrier(conf);
1723                 clear_bit(Unmerged, &rdev->flags);
1724         }
1725         md_integrity_add_rdev(rdev, mddev);
1726         print_conf(conf);
1727         return err;
1728 }
1729
1730 static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1731 {
1732         struct r10conf *conf = mddev->private;
1733         int err = 0;
1734         int number = rdev->raid_disk;
1735         struct md_rdev **rdevp;
1736         struct raid10_info *p = conf->mirrors + number;
1737
1738         print_conf(conf);
1739         if (rdev == p->rdev)
1740                 rdevp = &p->rdev;
1741         else if (rdev == p->replacement)
1742                 rdevp = &p->replacement;
1743         else
1744                 return 0;
1745
1746         if (test_bit(In_sync, &rdev->flags) ||
1747             atomic_read(&rdev->nr_pending)) {
1748                 err = -EBUSY;
1749                 goto abort;
1750         }
1751         /* Only remove faulty devices if recovery
1752          * is not possible.
1753          */
1754         if (!test_bit(Faulty, &rdev->flags) &&
1755             mddev->recovery_disabled != p->recovery_disabled &&
1756             (!p->replacement || p->replacement == rdev) &&
1757             number < conf->geo.raid_disks &&
1758             enough(conf, -1)) {
1759                 err = -EBUSY;
1760                 goto abort;
1761         }
1762         *rdevp = NULL;
1763         synchronize_rcu();
1764         if (atomic_read(&rdev->nr_pending)) {
1765                 /* lost the race, try later */
1766                 err = -EBUSY;
1767                 *rdevp = rdev;
1768                 goto abort;
1769         } else if (p->replacement) {
1770                 /* We must have just cleared 'rdev' */
1771                 p->rdev = p->replacement;
1772                 clear_bit(Replacement, &p->replacement->flags);
1773                 smp_mb(); /* Make sure other CPUs may see both as identical
1774                            * but will never see neither -- if they are careful.
1775                            */
1776                 p->replacement = NULL;
1777                 clear_bit(WantReplacement, &rdev->flags);
1778         } else
1779                 /* We might have just remove the Replacement as faulty
1780                  * Clear the flag just in case
1781                  */
1782                 clear_bit(WantReplacement, &rdev->flags);
1783
1784         err = md_integrity_register(mddev);
1785
1786 abort:
1787
1788         print_conf(conf);
1789         return err;
1790 }
1791
1792
1793 static void end_sync_read(struct bio *bio, int error)
1794 {
1795         struct r10bio *r10_bio = bio->bi_private;
1796         struct r10conf *conf = r10_bio->mddev->private;
1797         int d;
1798
1799         if (bio == r10_bio->master_bio) {
1800                 /* this is a reshape read */
1801                 d = r10_bio->read_slot; /* really the read dev */
1802         } else
1803                 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
1804
1805         if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1806                 set_bit(R10BIO_Uptodate, &r10_bio->state);
1807         else
1808                 /* The write handler will notice the lack of
1809                  * R10BIO_Uptodate and record any errors etc
1810                  */
1811                 atomic_add(r10_bio->sectors,
1812                            &conf->mirrors[d].rdev->corrected_errors);
1813
1814         /* for reconstruct, we always reschedule after a read.
1815          * for resync, only after all reads
1816          */
1817         rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
1818         if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1819             atomic_dec_and_test(&r10_bio->remaining)) {
1820                 /* we have read all the blocks,
1821                  * do the comparison in process context in raid10d
1822                  */
1823                 reschedule_retry(r10_bio);
1824         }
1825 }
1826
1827 static void end_sync_request(struct r10bio *r10_bio)
1828 {
1829         struct mddev *mddev = r10_bio->mddev;
1830
1831         while (atomic_dec_and_test(&r10_bio->remaining)) {
1832                 if (r10_bio->master_bio == NULL) {
1833                         /* the primary of several recovery bios */
1834                         sector_t s = r10_bio->sectors;
1835                         if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1836                             test_bit(R10BIO_WriteError, &r10_bio->state))
1837                                 reschedule_retry(r10_bio);
1838                         else
1839                                 put_buf(r10_bio);
1840                         md_done_sync(mddev, s, 1);
1841                         break;
1842                 } else {
1843                         struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
1844                         if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1845                             test_bit(R10BIO_WriteError, &r10_bio->state))
1846                                 reschedule_retry(r10_bio);
1847                         else
1848                                 put_buf(r10_bio);
1849                         r10_bio = r10_bio2;
1850                 }
1851         }
1852 }
1853
1854 static void end_sync_write(struct bio *bio, int error)
1855 {
1856         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1857         struct r10bio *r10_bio = bio->bi_private;
1858         struct mddev *mddev = r10_bio->mddev;
1859         struct r10conf *conf = mddev->private;
1860         int d;
1861         sector_t first_bad;
1862         int bad_sectors;
1863         int slot;
1864         int repl;
1865         struct md_rdev *rdev = NULL;
1866
1867         d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1868         if (repl)
1869                 rdev = conf->mirrors[d].replacement;
1870         else
1871                 rdev = conf->mirrors[d].rdev;
1872
1873         if (!uptodate) {
1874                 if (repl)
1875                         md_error(mddev, rdev);
1876                 else {
1877                         set_bit(WriteErrorSeen, &rdev->flags);
1878                         if (!test_and_set_bit(WantReplacement, &rdev->flags))
1879                                 set_bit(MD_RECOVERY_NEEDED,
1880                                         &rdev->mddev->recovery);
1881                         set_bit(R10BIO_WriteError, &r10_bio->state);
1882                 }
1883         } else if (is_badblock(rdev,
1884                              r10_bio->devs[slot].addr,
1885                              r10_bio->sectors,
1886                              &first_bad, &bad_sectors))
1887                 set_bit(R10BIO_MadeGood, &r10_bio->state);
1888
1889         rdev_dec_pending(rdev, mddev);
1890
1891         end_sync_request(r10_bio);
1892 }
1893
1894 /*
1895  * Note: sync and recover and handled very differently for raid10
1896  * This code is for resync.
1897  * For resync, we read through virtual addresses and read all blocks.
1898  * If there is any error, we schedule a write.  The lowest numbered
1899  * drive is authoritative.
1900  * However requests come for physical address, so we need to map.
1901  * For every physical address there are raid_disks/copies virtual addresses,
1902  * which is always are least one, but is not necessarly an integer.
1903  * This means that a physical address can span multiple chunks, so we may
1904  * have to submit multiple io requests for a single sync request.
1905  */
1906 /*
1907  * We check if all blocks are in-sync and only write to blocks that
1908  * aren't in sync
1909  */
1910 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1911 {
1912         struct r10conf *conf = mddev->private;
1913         int i, first;
1914         struct bio *tbio, *fbio;
1915         int vcnt;
1916
1917         atomic_set(&r10_bio->remaining, 1);
1918
1919         /* find the first device with a block */
1920         for (i=0; i<conf->copies; i++)
1921                 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
1922                         break;
1923
1924         if (i == conf->copies)
1925                 goto done;
1926
1927         first = i;
1928         fbio = r10_bio->devs[i].bio;
1929
1930         vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1931         /* now find blocks with errors */
1932         for (i=0 ; i < conf->copies ; i++) {
1933                 int  j, d;
1934
1935                 tbio = r10_bio->devs[i].bio;
1936
1937                 if (tbio->bi_end_io != end_sync_read)
1938                         continue;
1939                 if (i == first)
1940                         continue;
1941                 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
1942                         /* We know that the bi_io_vec layout is the same for
1943                          * both 'first' and 'i', so we just compare them.
1944                          * All vec entries are PAGE_SIZE;
1945                          */
1946                         for (j = 0; j < vcnt; j++)
1947                                 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1948                                            page_address(tbio->bi_io_vec[j].bv_page),
1949                                            fbio->bi_io_vec[j].bv_len))
1950                                         break;
1951                         if (j == vcnt)
1952                                 continue;
1953                         mddev->resync_mismatches += r10_bio->sectors;
1954                         if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1955                                 /* Don't fix anything. */
1956                                 continue;
1957                 }
1958                 /* Ok, we need to write this bio, either to correct an
1959                  * inconsistency or to correct an unreadable block.
1960                  * First we need to fixup bv_offset, bv_len and
1961                  * bi_vecs, as the read request might have corrupted these
1962                  */
1963                 tbio->bi_vcnt = vcnt;
1964                 tbio->bi_size = r10_bio->sectors << 9;
1965                 tbio->bi_idx = 0;
1966                 tbio->bi_phys_segments = 0;
1967                 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1968                 tbio->bi_flags |= 1 << BIO_UPTODATE;
1969                 tbio->bi_next = NULL;
1970                 tbio->bi_rw = WRITE;
1971                 tbio->bi_private = r10_bio;
1972                 tbio->bi_sector = r10_bio->devs[i].addr;
1973
1974                 for (j=0; j < vcnt ; j++) {
1975                         tbio->bi_io_vec[j].bv_offset = 0;
1976                         tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
1977
1978                         memcpy(page_address(tbio->bi_io_vec[j].bv_page),
1979                                page_address(fbio->bi_io_vec[j].bv_page),
1980                                PAGE_SIZE);
1981                 }
1982                 tbio->bi_end_io = end_sync_write;
1983
1984                 d = r10_bio->devs[i].devnum;
1985                 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
1986                 atomic_inc(&r10_bio->remaining);
1987                 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9);
1988
1989                 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
1990                 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
1991                 generic_make_request(tbio);
1992         }
1993
1994         /* Now write out to any replacement devices
1995          * that are active
1996          */
1997         for (i = 0; i < conf->copies; i++) {
1998                 int j, d;
1999
2000                 tbio = r10_bio->devs[i].repl_bio;
2001                 if (!tbio || !tbio->bi_end_io)
2002                         continue;
2003                 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2004                     && r10_bio->devs[i].bio != fbio)
2005                         for (j = 0; j < vcnt; j++)
2006                                 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2007                                        page_address(fbio->bi_io_vec[j].bv_page),
2008                                        PAGE_SIZE);
2009                 d = r10_bio->devs[i].devnum;
2010                 atomic_inc(&r10_bio->remaining);
2011                 md_sync_acct(conf->mirrors[d].replacement->bdev,
2012                              tbio->bi_size >> 9);
2013                 generic_make_request(tbio);
2014         }
2015
2016 done:
2017         if (atomic_dec_and_test(&r10_bio->remaining)) {
2018                 md_done_sync(mddev, r10_bio->sectors, 1);
2019                 put_buf(r10_bio);
2020         }
2021 }
2022
2023 /*
2024  * Now for the recovery code.
2025  * Recovery happens across physical sectors.
2026  * We recover all non-is_sync drives by finding the virtual address of
2027  * each, and then choose a working drive that also has that virt address.
2028  * There is a separate r10_bio for each non-in_sync drive.
2029  * Only the first two slots are in use. The first for reading,
2030  * The second for writing.
2031  *
2032  */
2033 static void fix_recovery_read_error(struct r10bio *r10_bio)
2034 {
2035         /* We got a read error during recovery.
2036          * We repeat the read in smaller page-sized sections.
2037          * If a read succeeds, write it to the new device or record
2038          * a bad block if we cannot.
2039          * If a read fails, record a bad block on both old and
2040          * new devices.
2041          */
2042         struct mddev *mddev = r10_bio->mddev;
2043         struct r10conf *conf = mddev->private;
2044         struct bio *bio = r10_bio->devs[0].bio;
2045         sector_t sect = 0;
2046         int sectors = r10_bio->sectors;
2047         int idx = 0;
2048         int dr = r10_bio->devs[0].devnum;
2049         int dw = r10_bio->devs[1].devnum;
2050
2051         while (sectors) {
2052                 int s = sectors;
2053                 struct md_rdev *rdev;
2054                 sector_t addr;
2055                 int ok;
2056
2057                 if (s > (PAGE_SIZE>>9))
2058                         s = PAGE_SIZE >> 9;
2059
2060                 rdev = conf->mirrors[dr].rdev;
2061                 addr = r10_bio->devs[0].addr + sect,
2062                 ok = sync_page_io(rdev,
2063                                   addr,
2064                                   s << 9,
2065                                   bio->bi_io_vec[idx].bv_page,
2066                                   READ, false);
2067                 if (ok) {
2068                         rdev = conf->mirrors[dw].rdev;
2069                         addr = r10_bio->devs[1].addr + sect;
2070                         ok = sync_page_io(rdev,
2071                                           addr,
2072                                           s << 9,
2073                                           bio->bi_io_vec[idx].bv_page,
2074                                           WRITE, false);
2075                         if (!ok) {
2076                                 set_bit(WriteErrorSeen, &rdev->flags);
2077                                 if (!test_and_set_bit(WantReplacement,
2078                                                       &rdev->flags))
2079                                         set_bit(MD_RECOVERY_NEEDED,
2080                                                 &rdev->mddev->recovery);
2081                         }
2082                 }
2083                 if (!ok) {
2084                         /* We don't worry if we cannot set a bad block -
2085                          * it really is bad so there is no loss in not
2086                          * recording it yet
2087                          */
2088                         rdev_set_badblocks(rdev, addr, s, 0);
2089
2090                         if (rdev != conf->mirrors[dw].rdev) {
2091                                 /* need bad block on destination too */
2092                                 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
2093                                 addr = r10_bio->devs[1].addr + sect;
2094                                 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2095                                 if (!ok) {
2096                                         /* just abort the recovery */
2097                                         printk(KERN_NOTICE
2098                                                "md/raid10:%s: recovery aborted"
2099                                                " due to read error\n",
2100                                                mdname(mddev));
2101
2102                                         conf->mirrors[dw].recovery_disabled
2103                                                 = mddev->recovery_disabled;
2104                                         set_bit(MD_RECOVERY_INTR,
2105                                                 &mddev->recovery);
2106                                         break;
2107                                 }
2108                         }
2109                 }
2110
2111                 sectors -= s;
2112                 sect += s;
2113                 idx++;
2114         }
2115 }
2116
2117 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2118 {
2119         struct r10conf *conf = mddev->private;
2120         int d;
2121         struct bio *wbio, *wbio2;
2122
2123         if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2124                 fix_recovery_read_error(r10_bio);
2125                 end_sync_request(r10_bio);
2126                 return;
2127         }
2128
2129         /*
2130          * share the pages with the first bio
2131          * and submit the write request
2132          */
2133         d = r10_bio->devs[1].devnum;
2134         wbio = r10_bio->devs[1].bio;
2135         wbio2 = r10_bio->devs[1].repl_bio;
2136         if (wbio->bi_end_io) {
2137                 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2138                 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
2139                 generic_make_request(wbio);
2140         }
2141         if (wbio2 && wbio2->bi_end_io) {
2142                 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2143                 md_sync_acct(conf->mirrors[d].replacement->bdev,
2144                              wbio2->bi_size >> 9);
2145                 generic_make_request(wbio2);
2146         }
2147 }
2148
2149
2150 /*
2151  * Used by fix_read_error() to decay the per rdev read_errors.
2152  * We halve the read error count for every hour that has elapsed
2153  * since the last recorded read error.
2154  *
2155  */
2156 static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
2157 {
2158         struct timespec cur_time_mon;
2159         unsigned long hours_since_last;
2160         unsigned int read_errors = atomic_read(&rdev->read_errors);
2161
2162         ktime_get_ts(&cur_time_mon);
2163
2164         if (rdev->last_read_error.tv_sec == 0 &&
2165             rdev->last_read_error.tv_nsec == 0) {
2166                 /* first time we've seen a read error */
2167                 rdev->last_read_error = cur_time_mon;
2168                 return;
2169         }
2170
2171         hours_since_last = (cur_time_mon.tv_sec -
2172                             rdev->last_read_error.tv_sec) / 3600;
2173
2174         rdev->last_read_error = cur_time_mon;
2175
2176         /*
2177          * if hours_since_last is > the number of bits in read_errors
2178          * just set read errors to 0. We do this to avoid
2179          * overflowing the shift of read_errors by hours_since_last.
2180          */
2181         if (hours_since_last >= 8 * sizeof(read_errors))
2182                 atomic_set(&rdev->read_errors, 0);
2183         else
2184                 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2185 }
2186
2187 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
2188                             int sectors, struct page *page, int rw)
2189 {
2190         sector_t first_bad;
2191         int bad_sectors;
2192
2193         if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2194             && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2195                 return -1;
2196         if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2197                 /* success */
2198                 return 1;
2199         if (rw == WRITE) {
2200                 set_bit(WriteErrorSeen, &rdev->flags);
2201                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2202                         set_bit(MD_RECOVERY_NEEDED,
2203                                 &rdev->mddev->recovery);
2204         }
2205         /* need to record an error - either for the block or the device */
2206         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2207                 md_error(rdev->mddev, rdev);
2208         return 0;
2209 }
2210
2211 /*
2212  * This is a kernel thread which:
2213  *
2214  *      1.      Retries failed read operations on working mirrors.
2215  *      2.      Updates the raid superblock when problems encounter.
2216  *      3.      Performs writes following reads for array synchronising.
2217  */
2218
2219 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2220 {
2221         int sect = 0; /* Offset from r10_bio->sector */
2222         int sectors = r10_bio->sectors;
2223         struct md_rdev*rdev;
2224         int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
2225         int d = r10_bio->devs[r10_bio->read_slot].devnum;
2226
2227         /* still own a reference to this rdev, so it cannot
2228          * have been cleared recently.
2229          */
2230         rdev = conf->mirrors[d].rdev;
2231
2232         if (test_bit(Faulty, &rdev->flags))
2233                 /* drive has already been failed, just ignore any
2234                    more fix_read_error() attempts */
2235                 return;
2236
2237         check_decay_read_errors(mddev, rdev);
2238         atomic_inc(&rdev->read_errors);
2239         if (atomic_read(&rdev->read_errors) > max_read_errors) {
2240                 char b[BDEVNAME_SIZE];
2241                 bdevname(rdev->bdev, b);
2242
2243                 printk(KERN_NOTICE
2244                        "md/raid10:%s: %s: Raid device exceeded "
2245                        "read_error threshold [cur %d:max %d]\n",
2246                        mdname(mddev), b,
2247                        atomic_read(&rdev->read_errors), max_read_errors);
2248                 printk(KERN_NOTICE
2249                        "md/raid10:%s: %s: Failing raid device\n",
2250                        mdname(mddev), b);
2251                 md_error(mddev, conf->mirrors[d].rdev);
2252                 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
2253                 return;
2254         }
2255
2256         while(sectors) {
2257                 int s = sectors;
2258                 int sl = r10_bio->read_slot;
2259                 int success = 0;
2260                 int start;
2261
2262                 if (s > (PAGE_SIZE>>9))
2263                         s = PAGE_SIZE >> 9;
2264
2265                 rcu_read_lock();
2266                 do {
2267                         sector_t first_bad;
2268                         int bad_sectors;
2269
2270                         d = r10_bio->devs[sl].devnum;
2271                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2272                         if (rdev &&
2273                             !test_bit(Unmerged, &rdev->flags) &&
2274                             test_bit(In_sync, &rdev->flags) &&
2275                             is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2276                                         &first_bad, &bad_sectors) == 0) {
2277                                 atomic_inc(&rdev->nr_pending);
2278                                 rcu_read_unlock();
2279                                 success = sync_page_io(rdev,
2280                                                        r10_bio->devs[sl].addr +
2281                                                        sect,
2282                                                        s<<9,
2283                                                        conf->tmppage, READ, false);
2284                                 rdev_dec_pending(rdev, mddev);
2285                                 rcu_read_lock();
2286                                 if (success)
2287                                         break;
2288                         }
2289                         sl++;
2290                         if (sl == conf->copies)
2291                                 sl = 0;
2292                 } while (!success && sl != r10_bio->read_slot);
2293                 rcu_read_unlock();
2294
2295                 if (!success) {
2296                         /* Cannot read from anywhere, just mark the block
2297                          * as bad on the first device to discourage future
2298                          * reads.
2299                          */
2300                         int dn = r10_bio->devs[r10_bio->read_slot].devnum;
2301                         rdev = conf->mirrors[dn].rdev;
2302
2303                         if (!rdev_set_badblocks(
2304                                     rdev,
2305                                     r10_bio->devs[r10_bio->read_slot].addr
2306                                     + sect,
2307                                     s, 0)) {
2308                                 md_error(mddev, rdev);
2309                                 r10_bio->devs[r10_bio->read_slot].bio
2310                                         = IO_BLOCKED;
2311                         }
2312                         break;
2313                 }
2314
2315                 start = sl;
2316                 /* write it back and re-read */
2317                 rcu_read_lock();
2318                 while (sl != r10_bio->read_slot) {
2319                         char b[BDEVNAME_SIZE];
2320
2321                         if (sl==0)
2322                                 sl = conf->copies;
2323                         sl--;
2324                         d = r10_bio->devs[sl].devnum;
2325                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2326                         if (!rdev ||
2327                             test_bit(Unmerged, &rdev->flags) ||
2328                             !test_bit(In_sync, &rdev->flags))
2329                                 continue;
2330
2331                         atomic_inc(&rdev->nr_pending);
2332                         rcu_read_unlock();
2333                         if (r10_sync_page_io(rdev,
2334                                              r10_bio->devs[sl].addr +
2335                                              sect,
2336                                              s, conf->tmppage, WRITE)
2337                             == 0) {
2338                                 /* Well, this device is dead */
2339                                 printk(KERN_NOTICE
2340                                        "md/raid10:%s: read correction "
2341                                        "write failed"
2342                                        " (%d sectors at %llu on %s)\n",
2343                                        mdname(mddev), s,
2344                                        (unsigned long long)(
2345                                                sect +
2346                                                choose_data_offset(r10_bio,
2347                                                                   rdev)),
2348                                        bdevname(rdev->bdev, b));
2349                                 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2350                                        "drive\n",
2351                                        mdname(mddev),
2352                                        bdevname(rdev->bdev, b));
2353                         }
2354                         rdev_dec_pending(rdev, mddev);
2355                         rcu_read_lock();
2356                 }
2357                 sl = start;
2358                 while (sl != r10_bio->read_slot) {
2359                         char b[BDEVNAME_SIZE];
2360
2361                         if (sl==0)
2362                                 sl = conf->copies;
2363                         sl--;
2364                         d = r10_bio->devs[sl].devnum;
2365                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2366                         if (!rdev ||
2367                             !test_bit(In_sync, &rdev->flags))
2368                                 continue;
2369
2370                         atomic_inc(&rdev->nr_pending);
2371                         rcu_read_unlock();
2372                         switch (r10_sync_page_io(rdev,
2373                                              r10_bio->devs[sl].addr +
2374                                              sect,
2375                                              s, conf->tmppage,
2376                                                  READ)) {
2377                         case 0:
2378                                 /* Well, this device is dead */
2379                                 printk(KERN_NOTICE
2380                                        "md/raid10:%s: unable to read back "
2381                                        "corrected sectors"
2382                                        " (%d sectors at %llu on %s)\n",
2383                                        mdname(mddev), s,
2384                                        (unsigned long long)(
2385                                                sect +
2386                                                choose_data_offset(r10_bio, rdev)),
2387                                        bdevname(rdev->bdev, b));
2388                                 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2389                                        "drive\n",
2390                                        mdname(mddev),
2391                                        bdevname(rdev->bdev, b));
2392                                 break;
2393                         case 1:
2394                                 printk(KERN_INFO
2395                                        "md/raid10:%s: read error corrected"
2396                                        " (%d sectors at %llu on %s)\n",
2397                                        mdname(mddev), s,
2398                                        (unsigned long long)(
2399                                                sect +
2400                                                choose_data_offset(r10_bio, rdev)),
2401                                        bdevname(rdev->bdev, b));
2402                                 atomic_add(s, &rdev->corrected_errors);
2403                         }
2404
2405                         rdev_dec_pending(rdev, mddev);
2406                         rcu_read_lock();
2407                 }
2408                 rcu_read_unlock();
2409
2410                 sectors -= s;
2411                 sect += s;
2412         }
2413 }
2414
2415 static void bi_complete(struct bio *bio, int error)
2416 {
2417         complete((struct completion *)bio->bi_private);
2418 }
2419
2420 static int submit_bio_wait(int rw, struct bio *bio)
2421 {
2422         struct completion event;
2423         rw |= REQ_SYNC;
2424
2425         init_completion(&event);
2426         bio->bi_private = &event;
2427         bio->bi_end_io = bi_complete;
2428         submit_bio(rw, bio);
2429         wait_for_completion(&event);
2430
2431         return test_bit(BIO_UPTODATE, &bio->bi_flags);
2432 }
2433
2434 static int narrow_write_error(struct r10bio *r10_bio, int i)
2435 {
2436         struct bio *bio = r10_bio->master_bio;
2437         struct mddev *mddev = r10_bio->mddev;
2438         struct r10conf *conf = mddev->private;
2439         struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2440         /* bio has the data to be written to slot 'i' where
2441          * we just recently had a write error.
2442          * We repeatedly clone the bio and trim down to one block,
2443          * then try the write.  Where the write fails we record
2444          * a bad block.
2445          * It is conceivable that the bio doesn't exactly align with
2446          * blocks.  We must handle this.
2447          *
2448          * We currently own a reference to the rdev.
2449          */
2450
2451         int block_sectors;
2452         sector_t sector;
2453         int sectors;
2454         int sect_to_write = r10_bio->sectors;
2455         int ok = 1;
2456
2457         if (rdev->badblocks.shift < 0)
2458                 return 0;
2459
2460         block_sectors = 1 << rdev->badblocks.shift;
2461         sector = r10_bio->sector;
2462         sectors = ((r10_bio->sector + block_sectors)
2463                    & ~(sector_t)(block_sectors - 1))
2464                 - sector;
2465
2466         while (sect_to_write) {
2467                 struct bio *wbio;
2468                 if (sectors > sect_to_write)
2469                         sectors = sect_to_write;
2470                 /* Write at 'sector' for 'sectors' */
2471                 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2472                 md_trim_bio(wbio, sector - bio->bi_sector, sectors);
2473                 wbio->bi_sector = (r10_bio->devs[i].addr+
2474                                    choose_data_offset(r10_bio, rdev) +
2475                                    (sector - r10_bio->sector));
2476                 wbio->bi_bdev = rdev->bdev;
2477                 if (submit_bio_wait(WRITE, wbio) == 0)
2478                         /* Failure! */
2479                         ok = rdev_set_badblocks(rdev, sector,
2480                                                 sectors, 0)
2481                                 && ok;
2482
2483                 bio_put(wbio);
2484                 sect_to_write -= sectors;
2485                 sector += sectors;
2486                 sectors = block_sectors;
2487         }
2488         return ok;
2489 }
2490
2491 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2492 {
2493         int slot = r10_bio->read_slot;
2494         struct bio *bio;
2495         struct r10conf *conf = mddev->private;
2496         struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2497         char b[BDEVNAME_SIZE];
2498         unsigned long do_sync;
2499         int max_sectors;
2500
2501         /* we got a read error. Maybe the drive is bad.  Maybe just
2502          * the block and we can fix it.
2503          * We freeze all other IO, and try reading the block from
2504          * other devices.  When we find one, we re-write
2505          * and check it that fixes the read error.
2506          * This is all done synchronously while the array is
2507          * frozen.
2508          */
2509         bio = r10_bio->devs[slot].bio;
2510         bdevname(bio->bi_bdev, b);
2511         bio_put(bio);
2512         r10_bio->devs[slot].bio = NULL;
2513
2514         if (mddev->ro == 0) {
2515                 freeze_array(conf);
2516                 fix_read_error(conf, mddev, r10_bio);
2517                 unfreeze_array(conf);
2518         } else
2519                 r10_bio->devs[slot].bio = IO_BLOCKED;
2520
2521         rdev_dec_pending(rdev, mddev);
2522
2523 read_more:
2524         rdev = read_balance(conf, r10_bio, &max_sectors);
2525         if (rdev == NULL) {
2526                 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2527                        " read error for block %llu\n",
2528                        mdname(mddev), b,
2529                        (unsigned long long)r10_bio->sector);
2530                 raid_end_bio_io(r10_bio);
2531                 return;
2532         }
2533
2534         do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
2535         slot = r10_bio->read_slot;
2536         printk_ratelimited(
2537                 KERN_ERR
2538                 "md/raid10:%s: %s: redirecting "
2539                 "sector %llu to another mirror\n",
2540                 mdname(mddev),
2541                 bdevname(rdev->bdev, b),
2542                 (unsigned long long)r10_bio->sector);
2543         bio = bio_clone_mddev(r10_bio->master_bio,
2544                               GFP_NOIO, mddev);
2545         md_trim_bio(bio,
2546                     r10_bio->sector - bio->bi_sector,
2547                     max_sectors);
2548         r10_bio->devs[slot].bio = bio;
2549         r10_bio->devs[slot].rdev = rdev;
2550         bio->bi_sector = r10_bio->devs[slot].addr
2551                 + choose_data_offset(r10_bio, rdev);
2552         bio->bi_bdev = rdev->bdev;
2553         bio->bi_rw = READ | do_sync;
2554         bio->bi_private = r10_bio;
2555         bio->bi_end_io = raid10_end_read_request;
2556         if (max_sectors < r10_bio->sectors) {
2557                 /* Drat - have to split this up more */
2558                 struct bio *mbio = r10_bio->master_bio;
2559                 int sectors_handled =
2560                         r10_bio->sector + max_sectors
2561                         - mbio->bi_sector;
2562                 r10_bio->sectors = max_sectors;
2563                 spin_lock_irq(&conf->device_lock);
2564                 if (mbio->bi_phys_segments == 0)
2565                         mbio->bi_phys_segments = 2;
2566                 else
2567                         mbio->bi_phys_segments++;
2568                 spin_unlock_irq(&conf->device_lock);
2569                 generic_make_request(bio);
2570
2571                 r10_bio = mempool_alloc(conf->r10bio_pool,
2572                                         GFP_NOIO);
2573                 r10_bio->master_bio = mbio;
2574                 r10_bio->sectors = (mbio->bi_size >> 9)
2575                         - sectors_handled;
2576                 r10_bio->state = 0;
2577                 set_bit(R10BIO_ReadError,
2578                         &r10_bio->state);
2579                 r10_bio->mddev = mddev;
2580                 r10_bio->sector = mbio->bi_sector
2581                         + sectors_handled;
2582
2583                 goto read_more;
2584         } else
2585                 generic_make_request(bio);
2586 }
2587
2588 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2589 {
2590         /* Some sort of write request has finished and it
2591          * succeeded in writing where we thought there was a
2592          * bad block.  So forget the bad block.
2593          * Or possibly if failed and we need to record
2594          * a bad block.
2595          */
2596         int m;
2597         struct md_rdev *rdev;
2598
2599         if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2600             test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2601                 for (m = 0; m < conf->copies; m++) {
2602                         int dev = r10_bio->devs[m].devnum;
2603                         rdev = conf->mirrors[dev].rdev;
2604                         if (r10_bio->devs[m].bio == NULL)
2605                                 continue;
2606                         if (test_bit(BIO_UPTODATE,
2607                                      &r10_bio->devs[m].bio->bi_flags)) {
2608                                 rdev_clear_badblocks(
2609                                         rdev,
2610                                         r10_bio->devs[m].addr,
2611                                         r10_bio->sectors, 0);
2612                         } else {
2613                                 if (!rdev_set_badblocks(
2614                                             rdev,
2615                                             r10_bio->devs[m].addr,
2616                                             r10_bio->sectors, 0))
2617                                         md_error(conf->mddev, rdev);
2618                         }
2619                         rdev = conf->mirrors[dev].replacement;
2620                         if (r10_bio->devs[m].repl_bio == NULL)
2621                                 continue;
2622                         if (test_bit(BIO_UPTODATE,
2623                                      &r10_bio->devs[m].repl_bio->bi_flags)) {
2624                                 rdev_clear_badblocks(
2625                                         rdev,
2626                                         r10_bio->devs[m].addr,
2627                                         r10_bio->sectors, 0);
2628                         } else {
2629                                 if (!rdev_set_badblocks(
2630                                             rdev,
2631                                             r10_bio->devs[m].addr,
2632                                             r10_bio->sectors, 0))
2633                                         md_error(conf->mddev, rdev);
2634                         }
2635                 }
2636                 put_buf(r10_bio);
2637         } else {
2638                 for (m = 0; m < conf->copies; m++) {
2639                         int dev = r10_bio->devs[m].devnum;
2640                         struct bio *bio = r10_bio->devs[m].bio;
2641                         rdev = conf->mirrors[dev].rdev;
2642                         if (bio == IO_MADE_GOOD) {
2643                                 rdev_clear_badblocks(
2644                                         rdev,
2645                                         r10_bio->devs[m].addr,
2646                                         r10_bio->sectors, 0);
2647                                 rdev_dec_pending(rdev, conf->mddev);
2648                         } else if (bio != NULL &&
2649                                    !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2650                                 if (!narrow_write_error(r10_bio, m)) {
2651                                         md_error(conf->mddev, rdev);
2652                                         set_bit(R10BIO_Degraded,
2653                                                 &r10_bio->state);
2654                                 }
2655                                 rdev_dec_pending(rdev, conf->mddev);
2656                         }
2657                         bio = r10_bio->devs[m].repl_bio;
2658                         rdev = conf->mirrors[dev].replacement;
2659                         if (rdev && bio == IO_MADE_GOOD) {
2660                                 rdev_clear_badblocks(
2661                                         rdev,
2662                                         r10_bio->devs[m].addr,
2663                                         r10_bio->sectors, 0);
2664                                 rdev_dec_pending(rdev, conf->mddev);
2665                         }
2666                 }
2667                 if (test_bit(R10BIO_WriteError,
2668                              &r10_bio->state))
2669                         close_write(r10_bio);
2670                 raid_end_bio_io(r10_bio);
2671         }
2672 }
2673
2674 static void raid10d(struct mddev *mddev)
2675 {
2676         struct r10bio *r10_bio;
2677         unsigned long flags;
2678         struct r10conf *conf = mddev->private;
2679         struct list_head *head = &conf->retry_list;
2680         struct blk_plug plug;
2681
2682         md_check_recovery(mddev);
2683
2684         blk_start_plug(&plug);
2685         for (;;) {
2686
2687                 flush_pending_writes(conf);
2688
2689                 spin_lock_irqsave(&conf->device_lock, flags);
2690                 if (list_empty(head)) {
2691                         spin_unlock_irqrestore(&conf->device_lock, flags);
2692                         break;
2693                 }
2694                 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
2695                 list_del(head->prev);
2696                 conf->nr_queued--;
2697                 spin_unlock_irqrestore(&conf->device_lock, flags);
2698
2699                 mddev = r10_bio->mddev;
2700                 conf = mddev->private;
2701                 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2702                     test_bit(R10BIO_WriteError, &r10_bio->state))
2703                         handle_write_completed(conf, r10_bio);
2704                 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2705                         reshape_request_write(mddev, r10_bio);
2706                 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
2707                         sync_request_write(mddev, r10_bio);
2708                 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
2709                         recovery_request_write(mddev, r10_bio);
2710                 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
2711                         handle_read_error(mddev, r10_bio);
2712                 else {
2713                         /* just a partial read to be scheduled from a
2714                          * separate context
2715                          */
2716                         int slot = r10_bio->read_slot;
2717                         generic_make_request(r10_bio->devs[slot].bio);
2718                 }
2719
2720                 cond_resched();
2721                 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2722                         md_check_recovery(mddev);
2723         }
2724         blk_finish_plug(&plug);
2725 }
2726
2727
2728 static int init_resync(struct r10conf *conf)
2729 {
2730         int buffs;
2731         int i;
2732
2733         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2734         BUG_ON(conf->r10buf_pool);
2735         conf->have_replacement = 0;
2736         for (i = 0; i < conf->geo.raid_disks; i++)
2737                 if (conf->mirrors[i].replacement)
2738                         conf->have_replacement = 1;
2739         conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2740         if (!conf->r10buf_pool)
2741                 return -ENOMEM;
2742         conf->next_resync = 0;
2743         return 0;
2744 }
2745
2746 /*
2747  * perform a "sync" on one "block"
2748  *
2749  * We need to make sure that no normal I/O request - particularly write
2750  * requests - conflict with active sync requests.
2751  *
2752  * This is achieved by tracking pending requests and a 'barrier' concept
2753  * that can be installed to exclude normal IO requests.
2754  *
2755  * Resync and recovery are handled very differently.
2756  * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2757  *
2758  * For resync, we iterate over virtual addresses, read all copies,
2759  * and update if there are differences.  If only one copy is live,
2760  * skip it.
2761  * For recovery, we iterate over physical addresses, read a good
2762  * value for each non-in_sync drive, and over-write.
2763  *
2764  * So, for recovery we may have several outstanding complex requests for a
2765  * given address, one for each out-of-sync device.  We model this by allocating
2766  * a number of r10_bio structures, one for each out-of-sync device.
2767  * As we setup these structures, we collect all bio's together into a list
2768  * which we then process collectively to add pages, and then process again
2769  * to pass to generic_make_request.
2770  *
2771  * The r10_bio structures are linked using a borrowed master_bio pointer.
2772  * This link is counted in ->remaining.  When the r10_bio that points to NULL
2773  * has its remaining count decremented to 0, the whole complex operation
2774  * is complete.
2775  *
2776  */
2777
2778 static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2779                              int *skipped, int go_faster)
2780 {
2781         struct r10conf *conf = mddev->private;
2782         struct r10bio *r10_bio;
2783         struct bio *biolist = NULL, *bio;
2784         sector_t max_sector, nr_sectors;
2785         int i;
2786         int max_sync;
2787         sector_t sync_blocks;
2788         sector_t sectors_skipped = 0;
2789         int chunks_skipped = 0;
2790         sector_t chunk_mask = conf->geo.chunk_mask;
2791
2792         if (!conf->r10buf_pool)
2793                 if (init_resync(conf))
2794                         return 0;
2795
2796  skipped:
2797         max_sector = mddev->dev_sectors;
2798         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2799             test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2800                 max_sector = mddev->resync_max_sectors;
2801         if (sector_nr >= max_sector) {
2802                 /* If we aborted, we need to abort the
2803                  * sync on the 'current' bitmap chucks (there can
2804                  * be several when recovering multiple devices).
2805                  * as we may have started syncing it but not finished.
2806                  * We can find the current address in
2807                  * mddev->curr_resync, but for recovery,
2808                  * we need to convert that to several
2809                  * virtual addresses.
2810                  */
2811                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2812                         end_reshape(conf);
2813                         return 0;
2814                 }
2815
2816                 if (mddev->curr_resync < max_sector) { /* aborted */
2817                         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2818                                 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2819                                                 &sync_blocks, 1);
2820                         else for (i = 0; i < conf->geo.raid_disks; i++) {
2821                                 sector_t sect =
2822                                         raid10_find_virt(conf, mddev->curr_resync, i);
2823                                 bitmap_end_sync(mddev->bitmap, sect,
2824                                                 &sync_blocks, 1);
2825                         }
2826                 } else {
2827                         /* completed sync */
2828                         if ((!mddev->bitmap || conf->fullsync)
2829                             && conf->have_replacement
2830                             && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2831                                 /* Completed a full sync so the replacements
2832                                  * are now fully recovered.
2833                                  */
2834                                 for (i = 0; i < conf->geo.raid_disks; i++)
2835                                         if (conf->mirrors[i].replacement)
2836                                                 conf->mirrors[i].replacement
2837                                                         ->recovery_offset
2838                                                         = MaxSector;
2839                         }
2840                         conf->fullsync = 0;
2841                 }
2842                 bitmap_close_sync(mddev->bitmap);
2843                 close_sync(conf);
2844                 *skipped = 1;
2845                 return sectors_skipped;
2846         }
2847
2848         if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2849                 return reshape_request(mddev, sector_nr, skipped);
2850
2851         if (chunks_skipped >= conf->geo.raid_disks) {
2852                 /* if there has been nothing to do on any drive,
2853                  * then there is nothing to do at all..
2854                  */
2855                 *skipped = 1;
2856                 return (max_sector - sector_nr) + sectors_skipped;
2857         }
2858
2859         if (max_sector > mddev->resync_max)
2860                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2861
2862         /* make sure whole request will fit in a chunk - if chunks
2863          * are meaningful
2864          */
2865         if (conf->geo.near_copies < conf->geo.raid_disks &&
2866             max_sector > (sector_nr | chunk_mask))
2867                 max_sector = (sector_nr | chunk_mask) + 1;
2868         /*
2869          * If there is non-resync activity waiting for us then
2870          * put in a delay to throttle resync.
2871          */
2872         if (!go_faster && conf->nr_waiting)
2873                 msleep_interruptible(1000);
2874
2875         /* Again, very different code for resync and recovery.
2876          * Both must result in an r10bio with a list of bios that
2877          * have bi_end_io, bi_sector, bi_bdev set,
2878          * and bi_private set to the r10bio.
2879          * For recovery, we may actually create several r10bios
2880          * with 2 bios in each, that correspond to the bios in the main one.
2881          * In this case, the subordinate r10bios link back through a
2882          * borrowed master_bio pointer, and the counter in the master
2883          * includes a ref from each subordinate.
2884          */
2885         /* First, we decide what to do and set ->bi_end_io
2886          * To end_sync_read if we want to read, and
2887          * end_sync_write if we will want to write.
2888          */
2889
2890         max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
2891         if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2892                 /* recovery... the complicated one */
2893                 int j;
2894                 r10_bio = NULL;
2895
2896                 for (i = 0 ; i < conf->geo.raid_disks; i++) {
2897                         int still_degraded;
2898                         struct r10bio *rb2;
2899                         sector_t sect;
2900                         int must_sync;
2901                         int any_working;
2902                         struct raid10_info *mirror = &conf->mirrors[i];
2903
2904                         if ((mirror->rdev == NULL ||
2905                              test_bit(In_sync, &mirror->rdev->flags))
2906                             &&
2907                             (mirror->replacement == NULL ||
2908                              test_bit(Faulty,
2909                                       &mirror->replacement->flags)))
2910                                 continue;
2911
2912                         still_degraded = 0;
2913                         /* want to reconstruct this device */
2914                         rb2 = r10_bio;
2915                         sect = raid10_find_virt(conf, sector_nr, i);
2916                         if (sect >= mddev->resync_max_sectors) {
2917                                 /* last stripe is not complete - don't
2918                                  * try to recover this sector.
2919                                  */
2920                                 continue;
2921                         }
2922                         /* Unless we are doing a full sync, or a replacement
2923                          * we only need to recover the block if it is set in
2924                          * the bitmap
2925                          */
2926                         must_sync = bitmap_start_sync(mddev->bitmap, sect,
2927                                                       &sync_blocks, 1);
2928                         if (sync_blocks < max_sync)
2929                                 max_sync = sync_blocks;
2930                         if (!must_sync &&
2931                             mirror->replacement == NULL &&
2932                             !conf->fullsync) {
2933                                 /* yep, skip the sync_blocks here, but don't assume
2934                                  * that there will never be anything to do here
2935                                  */
2936                                 chunks_skipped = -1;
2937                                 continue;
2938                         }
2939
2940                         r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
2941                         raise_barrier(conf, rb2 != NULL);
2942                         atomic_set(&r10_bio->remaining, 0);
2943
2944                         r10_bio->master_bio = (struct bio*)rb2;
2945                         if (rb2)
2946                                 atomic_inc(&rb2->remaining);
2947                         r10_bio->mddev = mddev;
2948                         set_bit(R10BIO_IsRecover, &r10_bio->state);
2949                         r10_bio->sector = sect;
2950
2951                         raid10_find_phys(conf, r10_bio);
2952
2953                         /* Need to check if the array will still be
2954                          * degraded
2955                          */
2956                         for (j = 0; j < conf->geo.raid_disks; j++)
2957                                 if (conf->mirrors[j].rdev == NULL ||
2958                                     test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
2959                                         still_degraded = 1;
2960                                         break;
2961                                 }
2962
2963                         must_sync = bitmap_start_sync(mddev->bitmap, sect,
2964                                                       &sync_blocks, still_degraded);
2965
2966                         any_working = 0;
2967                         for (j=0; j<conf->copies;j++) {
2968                                 int k;
2969                                 int d = r10_bio->devs[j].devnum;
2970                                 sector_t from_addr, to_addr;
2971                                 struct md_rdev *rdev;
2972                                 sector_t sector, first_bad;
2973                                 int bad_sectors;
2974                                 if (!conf->mirrors[d].rdev ||
2975                                     !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
2976                                         continue;
2977                                 /* This is where we read from */
2978                                 any_working = 1;
2979                                 rdev = conf->mirrors[d].rdev;
2980                                 sector = r10_bio->devs[j].addr;
2981
2982                                 if (is_badblock(rdev, sector, max_sync,
2983                                                 &first_bad, &bad_sectors)) {
2984                                         if (first_bad > sector)
2985                                                 max_sync = first_bad - sector;
2986                                         else {
2987                                                 bad_sectors -= (sector
2988                                                                 - first_bad);
2989                                                 if (max_sync > bad_sectors)
2990                                                         max_sync = bad_sectors;
2991                                                 continue;
2992                                         }
2993                                 }
2994                                 bio = r10_bio->devs[0].bio;
2995                                 bio->bi_next = biolist;
2996                                 biolist = bio;
2997                                 bio->bi_private = r10_bio;
2998                                 bio->bi_end_io = end_sync_read;
2999                                 bio->bi_rw = READ;
3000                                 from_addr = r10_bio->devs[j].addr;
3001                                 bio->bi_sector = from_addr + rdev->data_offset;
3002                                 bio->bi_bdev = rdev->bdev;
3003                                 atomic_inc(&rdev->nr_pending);
3004                                 /* and we write to 'i' (if not in_sync) */
3005
3006                                 for (k=0; k<conf->copies; k++)
3007                                         if (r10_bio->devs[k].devnum == i)
3008                                                 break;
3009                                 BUG_ON(k == conf->copies);
3010                                 to_addr = r10_bio->devs[k].addr;
3011                                 r10_bio->devs[0].devnum = d;
3012                                 r10_bio->devs[0].addr = from_addr;
3013                                 r10_bio->devs[1].devnum = i;
3014                                 r10_bio->devs[1].addr = to_addr;
3015
3016                                 rdev = mirror->rdev;
3017                                 if (!test_bit(In_sync, &rdev->flags)) {
3018                                         bio = r10_bio->devs[1].bio;
3019                                         bio->bi_next = biolist;
3020                                         biolist = bio;
3021                                         bio->bi_private = r10_bio;
3022                                         bio->bi_end_io = end_sync_write;
3023                                         bio->bi_rw = WRITE;
3024                                         bio->bi_sector = to_addr
3025                                                 + rdev->data_offset;
3026                                         bio->bi_bdev = rdev->bdev;
3027                                         atomic_inc(&r10_bio->remaining);
3028                                 } else
3029                                         r10_bio->devs[1].bio->bi_end_io = NULL;
3030
3031                                 /* and maybe write to replacement */
3032                                 bio = r10_bio->devs[1].repl_bio;
3033                                 if (bio)
3034                                         bio->bi_end_io = NULL;
3035                                 rdev = mirror->replacement;
3036                                 /* Note: if rdev != NULL, then bio
3037                                  * cannot be NULL as r10buf_pool_alloc will
3038                                  * have allocated it.
3039                                  * So the second test here is pointless.
3040                                  * But it keeps semantic-checkers happy, and
3041                                  * this comment keeps human reviewers
3042                                  * happy.
3043                                  */
3044                                 if (rdev == NULL || bio == NULL ||
3045                                     test_bit(Faulty, &rdev->flags))
3046                                         break;
3047                                 bio->bi_next = biolist;
3048                                 biolist = bio;
3049                                 bio->bi_private = r10_bio;
3050                                 bio->bi_end_io = end_sync_write;
3051                                 bio->bi_rw = WRITE;
3052                                 bio->bi_sector = to_addr + rdev->data_offset;
3053                                 bio->bi_bdev = rdev->bdev;
3054                                 atomic_inc(&r10_bio->remaining);
3055                                 break;
3056                         }
3057                         if (j == conf->copies) {
3058                                 /* Cannot recover, so abort the recovery or
3059                                  * record a bad block */
3060                                 put_buf(r10_bio);
3061                                 if (rb2)
3062                                         atomic_dec(&rb2->remaining);
3063                                 r10_bio = rb2;
3064                                 if (any_working) {
3065                                         /* problem is that there are bad blocks
3066                                          * on other device(s)
3067                                          */
3068                                         int k;
3069                                         for (k = 0; k < conf->copies; k++)
3070                                                 if (r10_bio->devs[k].devnum == i)
3071                                                         break;
3072                                         if (!test_bit(In_sync,
3073                                                       &mirror->rdev->flags)
3074                                             && !rdev_set_badblocks(
3075                                                     mirror->rdev,
3076                                                     r10_bio->devs[k].addr,
3077                                                     max_sync, 0))
3078                                                 any_working = 0;
3079                                         if (mirror->replacement &&
3080                                             !rdev_set_badblocks(
3081                                                     mirror->replacement,
3082                                                     r10_bio->devs[k].addr,
3083                                                     max_sync, 0))
3084                                                 any_working = 0;
3085                                 }
3086                                 if (!any_working)  {
3087                                         if (!test_and_set_bit(MD_RECOVERY_INTR,
3088                                                               &mddev->recovery))
3089                                                 printk(KERN_INFO "md/raid10:%s: insufficient "
3090                                                        "working devices for recovery.\n",
3091                                                        mdname(mddev));
3092                                         mirror->recovery_disabled
3093                                                 = mddev->recovery_disabled;
3094                                 }
3095                                 break;
3096                         }
3097                 }
3098                 if (biolist == NULL) {
3099                         while (r10_bio) {
3100                                 struct r10bio *rb2 = r10_bio;
3101                                 r10_bio = (struct r10bio*) rb2->master_bio;
3102                                 rb2->master_bio = NULL;
3103                                 put_buf(rb2);
3104                         }
3105                         goto giveup;
3106                 }
3107         } else {
3108                 /* resync. Schedule a read for every block at this virt offset */
3109                 int count = 0;
3110
3111                 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3112
3113                 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3114                                        &sync_blocks, mddev->degraded) &&
3115                     !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3116                                                  &mddev->recovery)) {
3117                         /* We can skip this block */
3118                         *skipped = 1;
3119                         return sync_blocks + sectors_skipped;
3120                 }
3121                 if (sync_blocks < max_sync)
3122                         max_sync = sync_blocks;
3123                 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3124
3125                 r10_bio->mddev = mddev;
3126                 atomic_set(&r10_bio->remaining, 0);
3127                 raise_barrier(conf, 0);
3128                 conf->next_resync = sector_nr;
3129
3130                 r10_bio->master_bio = NULL;
3131                 r10_bio->sector = sector_nr;
3132                 set_bit(R10BIO_IsSync, &r10_bio->state);
3133                 raid10_find_phys(conf, r10_bio);
3134                 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3135
3136                 for (i = 0; i < conf->copies; i++) {
3137                         int d = r10_bio->devs[i].devnum;
3138                         sector_t first_bad, sector;
3139                         int bad_sectors;
3140
3141                         if (r10_bio->devs[i].repl_bio)
3142                                 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3143
3144                         bio = r10_bio->devs[i].bio;
3145                         bio->bi_end_io = NULL;
3146                         clear_bit(BIO_UPTODATE, &bio->bi_flags);
3147                         if (conf->mirrors[d].rdev == NULL ||
3148                             test_bit(Faulty, &conf->mirrors[d].rdev->flags))
3149                                 continue;
3150                         sector = r10_bio->devs[i].addr;
3151                         if (is_badblock(conf->mirrors[d].rdev,
3152                                         sector, max_sync,
3153                                         &first_bad, &bad_sectors)) {
3154                                 if (first_bad > sector)
3155                                         max_sync = first_bad - sector;
3156                                 else {
3157                                         bad_sectors -= (sector - first_bad);
3158                                         if (max_sync > bad_sectors)
3159                                                 max_sync = max_sync;
3160                                         continue;
3161                                 }
3162                         }
3163                         atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3164                         atomic_inc(&r10_bio->remaining);
3165                         bio->bi_next = biolist;
3166                         biolist = bio;
3167                         bio->bi_private = r10_bio;
3168                         bio->bi_end_io = end_sync_read;
3169                         bio->bi_rw = READ;
3170                         bio->bi_sector = sector +
3171                                 conf->mirrors[d].rdev->data_offset;
3172                         bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3173                         count++;
3174
3175                         if (conf->mirrors[d].replacement == NULL ||
3176                             test_bit(Faulty,
3177                                      &conf->mirrors[d].replacement->flags))
3178                                 continue;
3179
3180                         /* Need to set up for writing to the replacement */
3181                         bio = r10_bio->devs[i].repl_bio;
3182                         clear_bit(BIO_UPTODATE, &bio->bi_flags);
3183
3184                         sector = r10_bio->devs[i].addr;
3185                         atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3186                         bio->bi_next = biolist;
3187                         biolist = bio;
3188                         bio->bi_private = r10_bio;
3189                         bio->bi_end_io = end_sync_write;
3190                         bio->bi_rw = WRITE;
3191                         bio->bi_sector = sector +
3192                                 conf->mirrors[d].replacement->data_offset;
3193                         bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3194                         count++;
3195                 }
3196
3197                 if (count < 2) {
3198                         for (i=0; i<conf->copies; i++) {
3199                                 int d = r10_bio->devs[i].devnum;
3200                                 if (r10_bio->devs[i].bio->bi_end_io)
3201                                         rdev_dec_pending(conf->mirrors[d].rdev,
3202                                                          mddev);
3203                                 if (r10_bio->devs[i].repl_bio &&
3204                                     r10_bio->devs[i].repl_bio->bi_end_io)
3205                                         rdev_dec_pending(
3206                                                 conf->mirrors[d].replacement,
3207                                                 mddev);
3208                         }
3209                         put_buf(r10_bio);
3210                         biolist = NULL;
3211                         goto giveup;
3212                 }
3213         }
3214
3215         for (bio = biolist; bio ; bio=bio->bi_next) {
3216
3217                 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
3218                 if (bio->bi_end_io)
3219                         bio->bi_flags |= 1 << BIO_UPTODATE;
3220                 bio->bi_vcnt = 0;
3221                 bio->bi_idx = 0;
3222                 bio->bi_phys_segments = 0;
3223                 bio->bi_size = 0;
3224         }
3225
3226         nr_sectors = 0;
3227         if (sector_nr + max_sync < max_sector)
3228                 max_sector = sector_nr + max_sync;
3229         do {
3230                 struct page *page;
3231                 int len = PAGE_SIZE;
3232                 if (sector_nr + (len>>9) > max_sector)
3233                         len = (max_sector - sector_nr) << 9;
3234                 if (len == 0)
3235                         break;
3236                 for (bio= biolist ; bio ; bio=bio->bi_next) {
3237                         struct bio *bio2;
3238                         page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
3239                         if (bio_add_page(bio, page, len, 0))
3240                                 continue;
3241
3242                         /* stop here */
3243                         bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3244                         for (bio2 = biolist;
3245                              bio2 && bio2 != bio;
3246                              bio2 = bio2->bi_next) {
3247                                 /* remove last page from this bio */
3248                                 bio2->bi_vcnt--;
3249                                 bio2->bi_size -= len;
3250                                 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3251                         }
3252                         goto bio_full;
3253                 }
3254                 nr_sectors += len>>9;
3255                 sector_nr += len>>9;
3256         } while (biolist->bi_vcnt < RESYNC_PAGES);
3257  bio_full:
3258         r10_bio->sectors = nr_sectors;
3259
3260         while (biolist) {
3261                 bio = biolist;
3262                 biolist = biolist->bi_next;
3263
3264                 bio->bi_next = NULL;
3265                 r10_bio = bio->bi_private;
3266                 r10_bio->sectors = nr_sectors;
3267
3268                 if (bio->bi_end_io == end_sync_read) {
3269                         md_sync_acct(bio->bi_bdev, nr_sectors);
3270                         generic_make_request(bio);
3271                 }
3272         }
3273
3274         if (sectors_skipped)
3275                 /* pretend they weren't skipped, it makes
3276                  * no important difference in this case
3277                  */
3278                 md_done_sync(mddev, sectors_skipped, 1);
3279
3280         return sectors_skipped + nr_sectors;
3281  giveup:
3282         /* There is nowhere to write, so all non-sync
3283          * drives must be failed or in resync, all drives
3284          * have a bad block, so try the next chunk...
3285          */
3286         if (sector_nr + max_sync < max_sector)
3287                 max_sector = sector_nr + max_sync;
3288
3289         sectors_skipped += (max_sector - sector_nr);
3290         chunks_skipped ++;
3291         sector_nr = max_sector;
3292         goto skipped;
3293 }
3294
3295 static sector_t
3296 raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
3297 {
3298         sector_t size;
3299         struct r10conf *conf = mddev->private;
3300
3301         if (!raid_disks)
3302                 raid_disks = min(conf->geo.raid_disks,
3303                                  conf->prev.raid_disks);
3304         if (!sectors)
3305                 sectors = conf->dev_sectors;
3306
3307         size = sectors >> conf->geo.chunk_shift;
3308         sector_div(size, conf->geo.far_copies);
3309         size = size * raid_disks;
3310         sector_div(size, conf->geo.near_copies);
3311
3312         return size << conf->geo.chunk_shift;
3313 }
3314
3315 static void calc_sectors(struct r10conf *conf, sector_t size)
3316 {
3317         /* Calculate the number of sectors-per-device that will
3318          * actually be used, and set conf->dev_sectors and
3319          * conf->stride
3320          */
3321
3322         size = size >> conf->geo.chunk_shift;
3323         sector_div(size, conf->geo.far_copies);
3324         size = size * conf->geo.raid_disks;
3325         sector_div(size, conf->geo.near_copies);
3326         /* 'size' is now the number of chunks in the array */
3327         /* calculate "used chunks per device" */
3328         size = size * conf->copies;
3329
3330         /* We need to round up when dividing by raid_disks to
3331          * get the stride size.
3332          */
3333         size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
3334
3335         conf->dev_sectors = size << conf->geo.chunk_shift;
3336
3337         if (conf->geo.far_offset)
3338                 conf->geo.stride = 1 << conf->geo.chunk_shift;
3339         else {
3340                 sector_div(size, conf->geo.far_copies);
3341                 conf->geo.stride = size << conf->geo.chunk_shift;
3342         }
3343 }
3344
3345 enum geo_type {geo_new, geo_old, geo_start};
3346 static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3347 {
3348         int nc, fc, fo;
3349         int layout, chunk, disks;
3350         switch (new) {
3351         case geo_old:
3352                 layout = mddev->layout;
3353                 chunk = mddev->chunk_sectors;
3354                 disks = mddev->raid_disks - mddev->delta_disks;
3355                 break;
3356         case geo_new:
3357                 layout = mddev->new_layout;
3358                 chunk = mddev->new_chunk_sectors;
3359                 disks = mddev->raid_disks;
3360                 break;
3361         default: /* avoid 'may be unused' warnings */
3362         case geo_start: /* new when starting reshape - raid_disks not
3363                          * updated yet. */
3364                 layout = mddev->new_layout;
3365                 chunk = mddev->new_chunk_sectors;
3366                 disks = mddev->raid_disks + mddev->delta_disks;
3367                 break;
3368         }
3369         if (layout >> 17)
3370                 return -1;
3371         if (chunk < (PAGE_SIZE >> 9) ||
3372             !is_power_of_2(chunk))
3373                 return -2;
3374         nc = layout & 255;
3375         fc = (layout >> 8) & 255;
3376         fo = layout & (1<<16);
3377         geo->raid_disks = disks;
3378         geo->near_copies = nc;
3379         geo->far_copies = fc;
3380         geo->far_offset = fo;
3381         geo->chunk_mask = chunk - 1;
3382         geo->chunk_shift = ffz(~chunk);
3383         return nc*fc;
3384 }
3385
3386 static struct r10conf *setup_conf(struct mddev *mddev)
3387 {
3388         struct r10conf *conf = NULL;
3389         int err = -EINVAL;
3390         struct geom geo;
3391         int copies;
3392
3393         copies = setup_geo(&geo, mddev, geo_new);
3394
3395         if (copies == -2) {
3396                 printk(KERN_ERR "md/raid10:%s: chunk size must be "
3397                        "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3398                        mdname(mddev), PAGE_SIZE);
3399                 goto out;
3400         }
3401
3402         if (copies < 2 || copies > mddev->raid_disks) {
3403                 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
3404                        mdname(mddev), mddev->new_layout);
3405                 goto out;
3406         }
3407
3408         err = -ENOMEM;
3409         conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
3410         if (!conf)
3411                 goto out;
3412
3413         /* FIXME calc properly */
3414         conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
3415                                                             max(0,mddev->delta_disks)),
3416                                 GFP_KERNEL);
3417         if (!conf->mirrors)
3418                 goto out;
3419
3420         conf->tmppage = alloc_page(GFP_KERNEL);
3421         if (!conf->tmppage)
3422                 goto out;
3423
3424         conf->geo = geo;
3425         conf->copies = copies;
3426         conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3427                                            r10bio_pool_free, conf);
3428         if (!conf->r10bio_pool)
3429                 goto out;
3430
3431         calc_sectors(conf, mddev->dev_sectors);
3432         if (mddev->reshape_position == MaxSector) {
3433                 conf->prev = conf->geo;
3434                 conf->reshape_progress = MaxSector;
3435         } else {
3436                 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3437                         err = -EINVAL;
3438                         goto out;
3439                 }
3440                 conf->reshape_progress = mddev->reshape_position;
3441                 if (conf->prev.far_offset)
3442                         conf->prev.stride = 1 << conf->prev.chunk_shift;
3443                 else
3444                         /* far_copies must be 1 */
3445                         conf->prev.stride = conf->dev_sectors;
3446         }
3447         spin_lock_init(&conf->device_lock);
3448         INIT_LIST_HEAD(&conf->retry_list);
3449
3450         spin_lock_init(&conf->resync_lock);
3451         init_waitqueue_head(&conf->wait_barrier);
3452
3453         conf->thread = md_register_thread(raid10d, mddev, "raid10");
3454         if (!conf->thread)
3455                 goto out;
3456
3457         conf->mddev = mddev;
3458         return conf;
3459
3460  out:
3461         if (err == -ENOMEM)
3462                 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3463                        mdname(mddev));
3464         if (conf) {
3465                 if (conf->r10bio_pool)
3466                         mempool_destroy(conf->r10bio_pool);
3467                 kfree(conf->mirrors);
3468                 safe_put_page(conf->tmppage);
3469                 kfree(conf);
3470         }
3471         return ERR_PTR(err);
3472 }
3473
3474 static int run(struct mddev *mddev)
3475 {
3476         struct r10conf *conf;
3477         int i, disk_idx, chunk_size;
3478         struct raid10_info *disk;
3479         struct md_rdev *rdev;
3480         sector_t size;
3481         sector_t min_offset_diff = 0;
3482         int first = 1;
3483
3484         if (mddev->private == NULL) {
3485                 conf = setup_conf(mddev);
3486                 if (IS_ERR(conf))
3487                         return PTR_ERR(conf);
3488                 mddev->private = conf;
3489         }
3490         conf = mddev->private;
3491         if (!conf)
3492                 goto out;
3493
3494         mddev->thread = conf->thread;
3495         conf->thread = NULL;
3496
3497         chunk_size = mddev->chunk_sectors << 9;
3498         if (mddev->queue) {
3499                 blk_queue_io_min(mddev->queue, chunk_size);
3500                 if (conf->geo.raid_disks % conf->geo.near_copies)
3501                         blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3502                 else
3503                         blk_queue_io_opt(mddev->queue, chunk_size *
3504                                          (conf->geo.raid_disks / conf->geo.near_copies));
3505         }
3506
3507         rdev_for_each(rdev, mddev) {
3508                 long long diff;
3509                 struct request_queue *q;
3510
3511                 disk_idx = rdev->raid_disk;
3512                 if (disk_idx < 0)
3513                         continue;
3514                 if (disk_idx >= conf->geo.raid_disks &&
3515                     disk_idx >= conf->prev.raid_disks)
3516                         continue;
3517                 disk = conf->mirrors + disk_idx;
3518
3519                 if (test_bit(Replacement, &rdev->flags)) {
3520                         if (disk->replacement)
3521                                 goto out_free_conf;
3522                         disk->replacement = rdev;
3523                 } else {
3524                         if (disk->rdev)
3525                                 goto out_free_conf;
3526                         disk->rdev = rdev;
3527                 }
3528                 q = bdev_get_queue(rdev->bdev);
3529                 if (q->merge_bvec_fn)
3530                         mddev->merge_check_needed = 1;
3531                 diff = (rdev->new_data_offset - rdev->data_offset);
3532                 if (!mddev->reshape_backwards)
3533                         diff = -diff;
3534                 if (diff < 0)
3535                         diff = 0;
3536                 if (first || diff < min_offset_diff)
3537                         min_offset_diff = diff;
3538
3539                 if (mddev->gendisk)
3540                         disk_stack_limits(mddev->gendisk, rdev->bdev,
3541                                           rdev->data_offset << 9);
3542
3543                 disk->head_position = 0;
3544         }
3545
3546         /* need to check that every block has at least one working mirror */
3547         if (!enough(conf, -1)) {
3548                 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
3549                        mdname(mddev));
3550                 goto out_free_conf;
3551         }
3552
3553         if (conf->reshape_progress != MaxSector) {
3554                 /* must ensure that shape change is supported */
3555                 if (conf->geo.far_copies != 1 &&
3556                     conf->geo.far_offset == 0)
3557                         goto out_free_conf;
3558                 if (conf->prev.far_copies != 1 &&
3559                     conf->geo.far_offset == 0)
3560                         goto out_free_conf;
3561         }
3562
3563         mddev->degraded = 0;
3564         for (i = 0;
3565              i < conf->geo.raid_disks
3566                      || i < conf->prev.raid_disks;
3567              i++) {
3568
3569                 disk = conf->mirrors + i;
3570
3571                 if (!disk->rdev && disk->replacement) {
3572                         /* The replacement is all we have - use it */
3573                         disk->rdev = disk->replacement;
3574                         disk->replacement = NULL;
3575                         clear_bit(Replacement, &disk->rdev->flags);
3576                 }
3577
3578                 if (!disk->rdev ||
3579                     !test_bit(In_sync, &disk->rdev->flags)) {
3580                         disk->head_position = 0;
3581                         mddev->degraded++;
3582                         if (disk->rdev)
3583                                 conf->fullsync = 1;
3584                 }
3585                 disk->recovery_disabled = mddev->recovery_disabled - 1;
3586         }
3587
3588         if (mddev->recovery_cp != MaxSector)
3589                 printk(KERN_NOTICE "md/raid10:%s: not clean"
3590                        " -- starting background reconstruction\n",
3591                        mdname(mddev));
3592         printk(KERN_INFO
3593                 "md/raid10:%s: active with %d out of %d devices\n",
3594                 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3595                 conf->geo.raid_disks);
3596         /*
3597          * Ok, everything is just fine now
3598          */
3599         mddev->dev_sectors = conf->dev_sectors;
3600         size = raid10_size(mddev, 0, 0);
3601         md_set_array_sectors(mddev, size);
3602         mddev->resync_max_sectors = size;
3603
3604         if (mddev->queue) {
3605                 int stripe = conf->geo.raid_disks *
3606                         ((mddev->chunk_sectors << 9) / PAGE_SIZE);
3607                 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
3608                 mddev->queue->backing_dev_info.congested_data = mddev;
3609
3610                 /* Calculate max read-ahead size.
3611                  * We need to readahead at least twice a whole stripe....
3612                  * maybe...
3613                  */
3614                 stripe /= conf->geo.near_copies;
3615                 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3616                         mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
3617                 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
3618         }
3619
3620
3621         if (md_integrity_register(mddev))
3622                 goto out_free_conf;
3623
3624         if (conf->reshape_progress != MaxSector) {
3625                 unsigned long before_length, after_length;
3626
3627                 before_length = ((1 << conf->prev.chunk_shift) *
3628                                  conf->prev.far_copies);
3629                 after_length = ((1 << conf->geo.chunk_shift) *
3630                                 conf->geo.far_copies);
3631
3632                 if (max(before_length, after_length) > min_offset_diff) {
3633                         /* This cannot work */
3634                         printk("md/raid10: offset difference not enough to continue reshape\n");
3635                         goto out_free_conf;
3636                 }
3637                 conf->offset_diff = min_offset_diff;
3638
3639                 conf->reshape_safe = conf->reshape_progress;
3640                 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3641                 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3642                 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3643                 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3644                 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3645                                                         "reshape");
3646         }
3647
3648         return 0;
3649
3650 out_free_conf:
3651         md_unregister_thread(&mddev->thread);
3652         if (conf->r10bio_pool)
3653                 mempool_destroy(conf->r10bio_pool);
3654         safe_put_page(conf->tmppage);
3655         kfree(conf->mirrors);
3656         kfree(conf);
3657         mddev->private = NULL;
3658 out:
3659         return -EIO;
3660 }
3661
3662 static int stop(struct mddev *mddev)
3663 {
3664         struct r10conf *conf = mddev->private;
3665
3666         raise_barrier(conf, 0);
3667         lower_barrier(conf);
3668
3669         md_unregister_thread(&mddev->thread);
3670         if (mddev->queue)
3671                 /* the unplug fn references 'conf'*/
3672                 blk_sync_queue(mddev->queue);
3673
3674         if (conf->r10bio_pool)
3675                 mempool_destroy(conf->r10bio_pool);
3676         kfree(conf->mirrors);
3677         kfree(conf);
3678         mddev->private = NULL;
3679         return 0;
3680 }
3681
3682 static void raid10_quiesce(struct mddev *mddev, int state)
3683 {
3684         struct r10conf *conf = mddev->private;
3685
3686         switch(state) {
3687         case 1:
3688                 raise_barrier(conf, 0);
3689                 break;
3690         case 0:
3691                 lower_barrier(conf);
3692                 break;
3693         }
3694 }
3695
3696 static int raid10_resize(struct mddev *mddev, sector_t sectors)
3697 {
3698         /* Resize of 'far' arrays is not supported.
3699          * For 'near' and 'offset' arrays we can set the
3700          * number of sectors used to be an appropriate multiple
3701          * of the chunk size.
3702          * For 'offset', this is far_copies*chunksize.
3703          * For 'near' the multiplier is the LCM of
3704          * near_copies and raid_disks.
3705          * So if far_copies > 1 && !far_offset, fail.
3706          * Else find LCM(raid_disks, near_copy)*far_copies and
3707          * multiply by chunk_size.  Then round to this number.
3708          * This is mostly done by raid10_size()
3709          */
3710         struct r10conf *conf = mddev->private;
3711         sector_t oldsize, size;
3712
3713         if (mddev->reshape_position != MaxSector)
3714                 return -EBUSY;
3715
3716         if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
3717                 return -EINVAL;
3718
3719         oldsize = raid10_size(mddev, 0, 0);
3720         size = raid10_size(mddev, sectors, 0);
3721         if (mddev->external_size &&
3722             mddev->array_sectors > size)
3723                 return -EINVAL;
3724         if (mddev->bitmap) {
3725                 int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3726                 if (ret)
3727                         return ret;
3728         }
3729         md_set_array_sectors(mddev, size);
3730         set_capacity(mddev->gendisk, mddev->array_sectors);
3731         revalidate_disk(mddev->gendisk);
3732         if (sectors > mddev->dev_sectors &&
3733             mddev->recovery_cp > oldsize) {
3734                 mddev->recovery_cp = oldsize;
3735                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3736         }
3737         calc_sectors(conf, sectors);
3738         mddev->dev_sectors = conf->dev_sectors;
3739         mddev->resync_max_sectors = size;
3740         return 0;
3741 }
3742
3743 static void *raid10_takeover_raid0(struct mddev *mddev)
3744 {
3745         struct md_rdev *rdev;
3746         struct r10conf *conf;
3747
3748         if (mddev->degraded > 0) {
3749                 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3750                        mdname(mddev));
3751                 return ERR_PTR(-EINVAL);
3752         }
3753
3754         /* Set new parameters */
3755         mddev->new_level = 10;
3756         /* new layout: far_copies = 1, near_copies = 2 */
3757         mddev->new_layout = (1<<8) + 2;
3758         mddev->new_chunk_sectors = mddev->chunk_sectors;
3759         mddev->delta_disks = mddev->raid_disks;
3760         mddev->raid_disks *= 2;
3761         /* make sure it will be not marked as dirty */
3762         mddev->recovery_cp = MaxSector;
3763
3764         conf = setup_conf(mddev);
3765         if (!IS_ERR(conf)) {
3766                 rdev_for_each(rdev, mddev)
3767                         if (rdev->raid_disk >= 0)
3768                                 rdev->new_raid_disk = rdev->raid_disk * 2;
3769                 conf->barrier = 1;
3770         }
3771
3772         return conf;
3773 }
3774
3775 static void *raid10_takeover(struct mddev *mddev)
3776 {
3777         struct r0conf *raid0_conf;
3778
3779         /* raid10 can take over:
3780          *  raid0 - providing it has only two drives
3781          */
3782         if (mddev->level == 0) {
3783                 /* for raid0 takeover only one zone is supported */
3784                 raid0_conf = mddev->private;
3785                 if (raid0_conf->nr_strip_zones > 1) {
3786                         printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3787                                " with more than one zone.\n",
3788                                mdname(mddev));
3789                         return ERR_PTR(-EINVAL);
3790                 }
3791                 return raid10_takeover_raid0(mddev);
3792         }
3793         return ERR_PTR(-EINVAL);
3794 }
3795
3796 static int raid10_check_reshape(struct mddev *mddev)
3797 {
3798         /* Called when there is a request to change
3799          * - layout (to ->new_layout)
3800          * - chunk size (to ->new_chunk_sectors)
3801          * - raid_disks (by delta_disks)
3802          * or when trying to restart a reshape that was ongoing.
3803          *
3804          * We need to validate the request and possibly allocate
3805          * space if that might be an issue later.
3806          *
3807          * Currently we reject any reshape of a 'far' mode array,
3808          * allow chunk size to change if new is generally acceptable,
3809          * allow raid_disks to increase, and allow
3810          * a switch between 'near' mode and 'offset' mode.
3811          */
3812         struct r10conf *conf = mddev->private;
3813         struct geom geo;
3814
3815         if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3816                 return -EINVAL;
3817
3818         if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3819                 /* mustn't change number of copies */
3820                 return -EINVAL;
3821         if (geo.far_copies > 1 && !geo.far_offset)
3822                 /* Cannot switch to 'far' mode */
3823                 return -EINVAL;
3824
3825         if (mddev->array_sectors & geo.chunk_mask)
3826                         /* not factor of array size */
3827                         return -EINVAL;
3828
3829         if (!enough(conf, -1))
3830                 return -EINVAL;
3831
3832         kfree(conf->mirrors_new);
3833         conf->mirrors_new = NULL;
3834         if (mddev->delta_disks > 0) {
3835                 /* allocate new 'mirrors' list */
3836                 conf->mirrors_new = kzalloc(
3837                         sizeof(struct raid10_info)
3838                         *(mddev->raid_disks +
3839                           mddev->delta_disks),
3840                         GFP_KERNEL);
3841                 if (!conf->mirrors_new)
3842                         return -ENOMEM;
3843         }
3844         return 0;
3845 }
3846
3847 /*
3848  * Need to check if array has failed when deciding whether to:
3849  *  - start an array
3850  *  - remove non-faulty devices
3851  *  - add a spare
3852  *  - allow a reshape
3853  * This determination is simple when no reshape is happening.
3854  * However if there is a reshape, we need to carefully check
3855  * both the before and after sections.
3856  * This is because some failed devices may only affect one
3857  * of the two sections, and some non-in_sync devices may
3858  * be insync in the section most affected by failed devices.
3859  */
3860 static int calc_degraded(struct r10conf *conf)
3861 {
3862         int degraded, degraded2;
3863         int i;
3864
3865         rcu_read_lock();
3866         degraded = 0;
3867         /* 'prev' section first */
3868         for (i = 0; i < conf->prev.raid_disks; i++) {
3869                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3870                 if (!rdev || test_bit(Faulty, &rdev->flags))
3871                         degraded++;
3872                 else if (!test_bit(In_sync, &rdev->flags))
3873                         /* When we can reduce the number of devices in
3874                          * an array, this might not contribute to
3875                          * 'degraded'.  It does now.
3876                          */
3877                         degraded++;
3878         }
3879         rcu_read_unlock();
3880         if (conf->geo.raid_disks == conf->prev.raid_disks)
3881                 return degraded;
3882         rcu_read_lock();
3883         degraded2 = 0;
3884         for (i = 0; i < conf->geo.raid_disks; i++) {
3885                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3886                 if (!rdev || test_bit(Faulty, &rdev->flags))
3887                         degraded2++;
3888                 else if (!test_bit(In_sync, &rdev->flags)) {
3889                         /* If reshape is increasing the number of devices,
3890                          * this section has already been recovered, so
3891                          * it doesn't contribute to degraded.
3892                          * else it does.
3893                          */
3894                         if (conf->geo.raid_disks <= conf->prev.raid_disks)
3895                                 degraded2++;
3896                 }
3897         }
3898         rcu_read_unlock();
3899         if (degraded2 > degraded)
3900                 return degraded2;
3901         return degraded;
3902 }
3903
3904 static int raid10_start_reshape(struct mddev *mddev)
3905 {
3906         /* A 'reshape' has been requested. This commits
3907          * the various 'new' fields and sets MD_RECOVER_RESHAPE
3908          * This also checks if there are enough spares and adds them
3909          * to the array.
3910          * We currently require enough spares to make the final
3911          * array non-degraded.  We also require that the difference
3912          * between old and new data_offset - on each device - is
3913          * enough that we never risk over-writing.
3914          */
3915
3916         unsigned long before_length, after_length;
3917         sector_t min_offset_diff = 0;
3918         int first = 1;
3919         struct geom new;
3920         struct r10conf *conf = mddev->private;
3921         struct md_rdev *rdev;
3922         int spares = 0;
3923         int ret;
3924
3925         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3926                 return -EBUSY;
3927
3928         if (setup_geo(&new, mddev, geo_start) != conf->copies)
3929                 return -EINVAL;
3930
3931         before_length = ((1 << conf->prev.chunk_shift) *
3932                          conf->prev.far_copies);
3933         after_length = ((1 << conf->geo.chunk_shift) *
3934                         conf->geo.far_copies);
3935
3936         rdev_for_each(rdev, mddev) {
3937                 if (!test_bit(In_sync, &rdev->flags)
3938                     && !test_bit(Faulty, &rdev->flags))
3939                         spares++;
3940                 if (rdev->raid_disk >= 0) {
3941                         long long diff = (rdev->new_data_offset
3942                                           - rdev->data_offset);
3943                         if (!mddev->reshape_backwards)
3944                                 diff = -diff;
3945                         if (diff < 0)
3946                                 diff = 0;
3947                         if (first || diff < min_offset_diff)
3948                                 min_offset_diff = diff;
3949                 }
3950         }
3951
3952         if (max(before_length, after_length) > min_offset_diff)
3953                 return -EINVAL;
3954
3955         if (spares < mddev->delta_disks)
3956                 return -EINVAL;
3957
3958         conf->offset_diff = min_offset_diff;
3959         spin_lock_irq(&conf->device_lock);
3960         if (conf->mirrors_new) {
3961                 memcpy(conf->mirrors_new, conf->mirrors,
3962                        sizeof(struct raid10_info)*conf->prev.raid_disks);
3963                 smp_mb();
3964                 kfree(conf->mirrors_old); /* FIXME and elsewhere */
3965                 conf->mirrors_old = conf->mirrors;
3966                 conf->mirrors = conf->mirrors_new;
3967                 conf->mirrors_new = NULL;
3968         }
3969         setup_geo(&conf->geo, mddev, geo_start);
3970         smp_mb();
3971         if (mddev->reshape_backwards) {
3972                 sector_t size = raid10_size(mddev, 0, 0);
3973                 if (size < mddev->array_sectors) {
3974                         spin_unlock_irq(&conf->device_lock);
3975                         printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
3976                                mdname(mddev));
3977                         return -EINVAL;
3978                 }
3979                 mddev->resync_max_sectors = size;
3980                 conf->reshape_progress = size;
3981         } else
3982                 conf->reshape_progress = 0;
3983         spin_unlock_irq(&conf->device_lock);
3984
3985         if (mddev->delta_disks && mddev->bitmap) {
3986                 ret = bitmap_resize(mddev->bitmap,
3987                                     raid10_size(mddev, 0,
3988                                                 conf->geo.raid_disks),
3989                                     0, 0);
3990                 if (ret)
3991                         goto abort;
3992         }
3993         if (mddev->delta_disks > 0) {
3994                 rdev_for_each(rdev, mddev)
3995                         if (rdev->raid_disk < 0 &&
3996                             !test_bit(Faulty, &rdev->flags)) {
3997                                 if (raid10_add_disk(mddev, rdev) == 0) {
3998                                         if (rdev->raid_disk >=
3999                                             conf->prev.raid_disks)
4000                                                 set_bit(In_sync, &rdev->flags);
4001                                         else
4002                                                 rdev->recovery_offset = 0;
4003
4004                                         if (sysfs_link_rdev(mddev, rdev))
4005                                                 /* Failure here  is OK */;
4006                                 }
4007                         } else if (rdev->raid_disk >= conf->prev.raid_disks
4008                                    && !test_bit(Faulty, &rdev->flags)) {
4009                                 /* This is a spare that was manually added */
4010                                 set_bit(In_sync, &rdev->flags);
4011                         }
4012         }
4013         /* When a reshape changes the number of devices,
4014          * ->degraded is measured against the larger of the
4015          * pre and  post numbers.
4016          */
4017         spin_lock_irq(&conf->device_lock);
4018         mddev->degraded = calc_degraded(conf);
4019         spin_unlock_irq(&conf->device_lock);
4020         mddev->raid_disks = conf->geo.raid_disks;
4021         mddev->reshape_position = conf->reshape_progress;
4022         set_bit(MD_CHANGE_DEVS, &mddev->flags);
4023
4024         clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4025         clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4026         set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4027         set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4028
4029         mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4030                                                 "reshape");
4031         if (!mddev->sync_thread) {
4032                 ret = -EAGAIN;
4033                 goto abort;
4034         }
4035         conf->reshape_checkpoint = jiffies;
4036         md_wakeup_thread(mddev->sync_thread);
4037         md_new_event(mddev);
4038         return 0;
4039
4040 abort:
4041         mddev->recovery = 0;
4042         spin_lock_irq(&conf->device_lock);
4043         conf->geo = conf->prev;
4044         mddev->raid_disks = conf->geo.raid_disks;
4045         rdev_for_each(rdev, mddev)
4046                 rdev->new_data_offset = rdev->data_offset;
4047         smp_wmb();
4048         conf->reshape_progress = MaxSector;
4049         mddev->reshape_position = MaxSector;
4050         spin_unlock_irq(&conf->device_lock);
4051         return ret;
4052 }
4053
4054 /* Calculate the last device-address that could contain
4055  * any block from the chunk that includes the array-address 's'
4056  * and report the next address.
4057  * i.e. the address returned will be chunk-aligned and after
4058  * any data that is in the chunk containing 's'.
4059  */
4060 static sector_t last_dev_address(sector_t s, struct geom *geo)
4061 {
4062         s = (s | geo->chunk_mask) + 1;
4063         s >>= geo->chunk_shift;
4064         s *= geo->near_copies;
4065         s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4066         s *= geo->far_copies;
4067         s <<= geo->chunk_shift;
4068         return s;
4069 }
4070
4071 /* Calculate the first device-address that could contain
4072  * any block from the chunk that includes the array-address 's'.
4073  * This too will be the start of a chunk
4074  */
4075 static sector_t first_dev_address(sector_t s, struct geom *geo)
4076 {
4077         s >>= geo->chunk_shift;
4078         s *= geo->near_copies;
4079         sector_div(s, geo->raid_disks);
4080         s *= geo->far_copies;
4081         s <<= geo->chunk_shift;
4082         return s;
4083 }
4084
4085 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4086                                 int *skipped)
4087 {
4088         /* We simply copy at most one chunk (smallest of old and new)
4089          * at a time, possibly less if that exceeds RESYNC_PAGES,
4090          * or we hit a bad block or something.
4091          * This might mean we pause for normal IO in the middle of
4092          * a chunk, but that is not a problem was mddev->reshape_position
4093          * can record any location.
4094          *
4095          * If we will want to write to a location that isn't
4096          * yet recorded as 'safe' (i.e. in metadata on disk) then
4097          * we need to flush all reshape requests and update the metadata.
4098          *
4099          * When reshaping forwards (e.g. to more devices), we interpret
4100          * 'safe' as the earliest block which might not have been copied
4101          * down yet.  We divide this by previous stripe size and multiply
4102          * by previous stripe length to get lowest device offset that we
4103          * cannot write to yet.
4104          * We interpret 'sector_nr' as an address that we want to write to.
4105          * From this we use last_device_address() to find where we might
4106          * write to, and first_device_address on the  'safe' position.
4107          * If this 'next' write position is after the 'safe' position,
4108          * we must update the metadata to increase the 'safe' position.
4109          *
4110          * When reshaping backwards, we round in the opposite direction
4111          * and perform the reverse test:  next write position must not be
4112          * less than current safe position.
4113          *
4114          * In all this the minimum difference in data offsets
4115          * (conf->offset_diff - always positive) allows a bit of slack,
4116          * so next can be after 'safe', but not by more than offset_disk
4117          *
4118          * We need to prepare all the bios here before we start any IO
4119          * to ensure the size we choose is acceptable to all devices.
4120          * The means one for each copy for write-out and an extra one for
4121          * read-in.
4122          * We store the read-in bio in ->master_bio and the others in
4123          * ->devs[x].bio and ->devs[x].repl_bio.
4124          */
4125         struct r10conf *conf = mddev->private;
4126         struct r10bio *r10_bio;
4127         sector_t next, safe, last;
4128         int max_sectors;
4129         int nr_sectors;
4130         int s;
4131         struct md_rdev *rdev;
4132         int need_flush = 0;
4133         struct bio *blist;
4134         struct bio *bio, *read_bio;
4135         int sectors_done = 0;
4136
4137         if (sector_nr == 0) {
4138                 /* If restarting in the middle, skip the initial sectors */
4139                 if (mddev->reshape_backwards &&
4140                     conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4141                         sector_nr = (raid10_size(mddev, 0, 0)
4142                                      - conf->reshape_progress);
4143                 } else if (!mddev->reshape_backwards &&
4144                            conf->reshape_progress > 0)
4145                         sector_nr = conf->reshape_progress;
4146                 if (sector_nr) {
4147                         mddev->curr_resync_completed = sector_nr;
4148                         sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4149                         *skipped = 1;
4150                         return sector_nr;
4151                 }
4152         }
4153
4154         /* We don't use sector_nr to track where we are up to
4155          * as that doesn't work well for ->reshape_backwards.
4156          * So just use ->reshape_progress.
4157          */
4158         if (mddev->reshape_backwards) {
4159                 /* 'next' is the earliest device address that we might
4160                  * write to for this chunk in the new layout
4161                  */
4162                 next = first_dev_address(conf->reshape_progress - 1,
4163                                          &conf->geo);
4164
4165                 /* 'safe' is the last device address that we might read from
4166                  * in the old layout after a restart
4167                  */
4168                 safe = last_dev_address(conf->reshape_safe - 1,
4169                                         &conf->prev);
4170
4171                 if (next + conf->offset_diff < safe)
4172                         need_flush = 1;
4173
4174                 last = conf->reshape_progress - 1;
4175                 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4176                                                & conf->prev.chunk_mask);
4177                 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4178                         sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4179         } else {
4180                 /* 'next' is after the last device address that we
4181                  * might write to for this chunk in the new layout
4182                  */
4183                 next = last_dev_address(conf->reshape_progress, &conf->geo);
4184
4185                 /* 'safe' is the earliest device address that we might
4186                  * read from in the old layout after a restart
4187                  */
4188                 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4189
4190                 /* Need to update metadata if 'next' might be beyond 'safe'
4191                  * as that would possibly corrupt data
4192                  */
4193                 if (next > safe + conf->offset_diff)
4194                         need_flush = 1;
4195
4196                 sector_nr = conf->reshape_progress;
4197                 last  = sector_nr | (conf->geo.chunk_mask
4198                                      & conf->prev.chunk_mask);
4199
4200                 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4201                         last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4202         }
4203
4204         if (need_flush ||
4205             time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4206                 /* Need to update reshape_position in metadata */
4207                 wait_barrier(conf);
4208                 mddev->reshape_position = conf->reshape_progress;
4209                 if (mddev->reshape_backwards)
4210                         mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4211                                 - conf->reshape_progress;
4212                 else
4213                         mddev->curr_resync_completed = conf->reshape_progress;
4214                 conf->reshape_checkpoint = jiffies;
4215                 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4216                 md_wakeup_thread(mddev->thread);
4217                 wait_event(mddev->sb_wait, mddev->flags == 0 ||
4218                            kthread_should_stop());
4219                 conf->reshape_safe = mddev->reshape_position;
4220                 allow_barrier(conf);
4221         }
4222
4223 read_more:
4224         /* Now schedule reads for blocks from sector_nr to last */
4225         r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4226         raise_barrier(conf, sectors_done != 0);
4227         atomic_set(&r10_bio->remaining, 0);
4228         r10_bio->mddev = mddev;
4229         r10_bio->sector = sector_nr;
4230         set_bit(R10BIO_IsReshape, &r10_bio->state);
4231         r10_bio->sectors = last - sector_nr + 1;
4232         rdev = read_balance(conf, r10_bio, &max_sectors);
4233         BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4234
4235         if (!rdev) {
4236                 /* Cannot read from here, so need to record bad blocks
4237                  * on all the target devices.
4238                  */
4239                 // FIXME
4240                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4241                 return sectors_done;
4242         }
4243
4244         read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4245
4246         read_bio->bi_bdev = rdev->bdev;
4247         read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4248                                + rdev->data_offset);
4249         read_bio->bi_private = r10_bio;
4250         read_bio->bi_end_io = end_sync_read;
4251         read_bio->bi_rw = READ;
4252         read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4253         read_bio->bi_flags |= 1 << BIO_UPTODATE;
4254         read_bio->bi_vcnt = 0;
4255         read_bio->bi_idx = 0;
4256         read_bio->bi_size = 0;
4257         r10_bio->master_bio = read_bio;
4258         r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4259
4260         /* Now find the locations in the new layout */
4261         __raid10_find_phys(&conf->geo, r10_bio);
4262
4263         blist = read_bio;
4264         read_bio->bi_next = NULL;
4265
4266         for (s = 0; s < conf->copies*2; s++) {
4267                 struct bio *b;
4268                 int d = r10_bio->devs[s/2].devnum;
4269                 struct md_rdev *rdev2;
4270                 if (s&1) {
4271                         rdev2 = conf->mirrors[d].replacement;
4272                         b = r10_bio->devs[s/2].repl_bio;
4273                 } else {
4274                         rdev2 = conf->mirrors[d].rdev;
4275                         b = r10_bio->devs[s/2].bio;
4276                 }
4277                 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4278                         continue;
4279                 b->bi_bdev = rdev2->bdev;
4280                 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
4281                 b->bi_private = r10_bio;
4282                 b->bi_end_io = end_reshape_write;
4283                 b->bi_rw = WRITE;
4284                 b->bi_flags &= ~(BIO_POOL_MASK - 1);
4285                 b->bi_flags |= 1 << BIO_UPTODATE;
4286                 b->bi_next = blist;
4287                 b->bi_vcnt = 0;
4288                 b->bi_idx = 0;
4289                 b->bi_size = 0;
4290                 blist = b;
4291         }
4292
4293         /* Now add as many pages as possible to all of these bios. */
4294
4295         nr_sectors = 0;
4296         for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4297                 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4298                 int len = (max_sectors - s) << 9;
4299                 if (len > PAGE_SIZE)
4300                         len = PAGE_SIZE;
4301                 for (bio = blist; bio ; bio = bio->bi_next) {
4302                         struct bio *bio2;
4303                         if (bio_add_page(bio, page, len, 0))
4304                                 continue;
4305
4306                         /* Didn't fit, must stop */
4307                         for (bio2 = blist;
4308                              bio2 && bio2 != bio;
4309                              bio2 = bio2->bi_next) {
4310                                 /* Remove last page from this bio */
4311                                 bio2->bi_vcnt--;
4312                                 bio2->bi_size -= len;
4313                                 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4314                         }
4315                         goto bio_full;
4316                 }
4317                 sector_nr += len >> 9;
4318                 nr_sectors += len >> 9;
4319         }
4320 bio_full:
4321         r10_bio->sectors = nr_sectors;
4322
4323         /* Now submit the read */
4324         md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4325         atomic_inc(&r10_bio->remaining);
4326         read_bio->bi_next = NULL;
4327         generic_make_request(read_bio);
4328         sector_nr += nr_sectors;
4329         sectors_done += nr_sectors;
4330         if (sector_nr <= last)
4331                 goto read_more;
4332
4333         /* Now that we have done the whole section we can
4334          * update reshape_progress
4335          */
4336         if (mddev->reshape_backwards)
4337                 conf->reshape_progress -= sectors_done;
4338         else
4339                 conf->reshape_progress += sectors_done;
4340
4341         return sectors_done;
4342 }
4343
4344 static void end_reshape_request(struct r10bio *r10_bio);
4345 static int handle_reshape_read_error(struct mddev *mddev,
4346                                      struct r10bio *r10_bio);
4347 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4348 {
4349         /* Reshape read completed.  Hopefully we have a block
4350          * to write out.
4351          * If we got a read error then we do sync 1-page reads from
4352          * elsewhere until we find the data - or give up.
4353          */
4354         struct r10conf *conf = mddev->private;
4355         int s;
4356
4357         if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4358                 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4359                         /* Reshape has been aborted */
4360                         md_done_sync(mddev, r10_bio->sectors, 0);
4361                         return;
4362                 }
4363
4364         /* We definitely have the data in the pages, schedule the
4365          * writes.
4366          */
4367         atomic_set(&r10_bio->remaining, 1);
4368         for (s = 0; s < conf->copies*2; s++) {
4369                 struct bio *b;
4370                 int d = r10_bio->devs[s/2].devnum;
4371                 struct md_rdev *rdev;
4372                 if (s&1) {
4373                         rdev = conf->mirrors[d].replacement;
4374                         b = r10_bio->devs[s/2].repl_bio;
4375                 } else {
4376                         rdev = conf->mirrors[d].rdev;
4377                         b = r10_bio->devs[s/2].bio;
4378                 }
4379                 if (!rdev || test_bit(Faulty, &rdev->flags))
4380                         continue;
4381                 atomic_inc(&rdev->nr_pending);
4382                 md_sync_acct(b->bi_bdev, r10_bio->sectors);
4383                 atomic_inc(&r10_bio->remaining);
4384                 b->bi_next = NULL;
4385                 generic_make_request(b);
4386         }
4387         end_reshape_request(r10_bio);
4388 }
4389
4390 static void end_reshape(struct r10conf *conf)
4391 {
4392         if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4393                 return;
4394
4395         spin_lock_irq(&conf->device_lock);
4396         conf->prev = conf->geo;
4397         md_finish_reshape(conf->mddev);
4398         smp_wmb();
4399         conf->reshape_progress = MaxSector;
4400         spin_unlock_irq(&conf->device_lock);
4401
4402         /* read-ahead size must cover two whole stripes, which is
4403          * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4404          */
4405         if (conf->mddev->queue) {
4406                 int stripe = conf->geo.raid_disks *
4407                         ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4408                 stripe /= conf->geo.near_copies;
4409                 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4410                         conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4411         }
4412         conf->fullsync = 0;
4413 }
4414
4415
4416 static int handle_reshape_read_error(struct mddev *mddev,
4417                                      struct r10bio *r10_bio)
4418 {
4419         /* Use sync reads to get the blocks from somewhere else */
4420         int sectors = r10_bio->sectors;
4421         struct r10conf *conf = mddev->private;
4422         struct {
4423                 struct r10bio r10_bio;
4424                 struct r10dev devs[conf->copies];
4425         } on_stack;
4426         struct r10bio *r10b = &on_stack.r10_bio;
4427         int slot = 0;
4428         int idx = 0;
4429         struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4430
4431         r10b->sector = r10_bio->sector;
4432         __raid10_find_phys(&conf->prev, r10b);
4433
4434         while (sectors) {
4435                 int s = sectors;
4436                 int success = 0;
4437                 int first_slot = slot;
4438
4439                 if (s > (PAGE_SIZE >> 9))
4440                         s = PAGE_SIZE >> 9;
4441
4442                 while (!success) {
4443                         int d = r10b->devs[slot].devnum;
4444                         struct md_rdev *rdev = conf->mirrors[d].rdev;
4445                         sector_t addr;
4446                         if (rdev == NULL ||
4447                             test_bit(Faulty, &rdev->flags) ||
4448                             !test_bit(In_sync, &rdev->flags))
4449                                 goto failed;
4450
4451                         addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
4452                         success = sync_page_io(rdev,
4453                                                addr,
4454                                                s << 9,
4455                                                bvec[idx].bv_page,
4456                                                READ, false);
4457                         if (success)
4458                                 break;
4459                 failed:
4460                         slot++;
4461                         if (slot >= conf->copies)
4462                                 slot = 0;
4463                         if (slot == first_slot)
4464                                 break;
4465                 }
4466                 if (!success) {
4467                         /* couldn't read this block, must give up */
4468                         set_bit(MD_RECOVERY_INTR,
4469                                 &mddev->recovery);
4470                         return -EIO;
4471                 }
4472                 sectors -= s;
4473                 idx++;
4474         }
4475         return 0;
4476 }
4477
4478 static void end_reshape_write(struct bio *bio, int error)
4479 {
4480         int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4481         struct r10bio *r10_bio = bio->bi_private;
4482         struct mddev *mddev = r10_bio->mddev;
4483         struct r10conf *conf = mddev->private;
4484         int d;
4485         int slot;
4486         int repl;
4487         struct md_rdev *rdev = NULL;
4488
4489         d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4490         if (repl)
4491                 rdev = conf->mirrors[d].replacement;
4492         if (!rdev) {
4493                 smp_mb();
4494                 rdev = conf->mirrors[d].rdev;
4495         }
4496
4497         if (!uptodate) {
4498                 /* FIXME should record badblock */
4499                 md_error(mddev, rdev);
4500         }
4501
4502         rdev_dec_pending(rdev, mddev);
4503         end_reshape_request(r10_bio);
4504 }
4505
4506 static void end_reshape_request(struct r10bio *r10_bio)
4507 {
4508         if (!atomic_dec_and_test(&r10_bio->remaining))
4509                 return;
4510         md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4511         bio_put(r10_bio->master_bio);
4512         put_buf(r10_bio);
4513 }
4514
4515 static void raid10_finish_reshape(struct mddev *mddev)
4516 {
4517         struct r10conf *conf = mddev->private;
4518
4519         if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4520                 return;
4521
4522         if (mddev->delta_disks > 0) {
4523                 sector_t size = raid10_size(mddev, 0, 0);
4524                 md_set_array_sectors(mddev, size);
4525                 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4526                         mddev->recovery_cp = mddev->resync_max_sectors;
4527                         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4528                 }
4529                 mddev->resync_max_sectors = size;
4530                 set_capacity(mddev->gendisk, mddev->array_sectors);
4531                 revalidate_disk(mddev->gendisk);
4532         } else {
4533                 int d;
4534                 for (d = conf->geo.raid_disks ;
4535                      d < conf->geo.raid_disks - mddev->delta_disks;
4536                      d++) {
4537                         struct md_rdev *rdev = conf->mirrors[d].rdev;
4538                         if (rdev)
4539                                 clear_bit(In_sync, &rdev->flags);
4540                         rdev = conf->mirrors[d].replacement;
4541                         if (rdev)
4542                                 clear_bit(In_sync, &rdev->flags);
4543                 }
4544         }
4545         mddev->layout = mddev->new_layout;
4546         mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4547         mddev->reshape_position = MaxSector;
4548         mddev->delta_disks = 0;
4549         mddev->reshape_backwards = 0;
4550 }
4551
4552 static struct md_personality raid10_personality =
4553 {
4554         .name           = "raid10",
4555         .level          = 10,
4556         .owner          = THIS_MODULE,
4557         .make_request   = make_request,
4558         .run            = run,
4559         .stop           = stop,
4560         .status         = status,
4561         .error_handler  = error,
4562         .hot_add_disk   = raid10_add_disk,
4563         .hot_remove_disk= raid10_remove_disk,
4564         .spare_active   = raid10_spare_active,
4565         .sync_request   = sync_request,
4566         .quiesce        = raid10_quiesce,
4567         .size           = raid10_size,
4568         .resize         = raid10_resize,
4569         .takeover       = raid10_takeover,
4570         .check_reshape  = raid10_check_reshape,
4571         .start_reshape  = raid10_start_reshape,
4572         .finish_reshape = raid10_finish_reshape,
4573 };
4574
4575 static int __init raid_init(void)
4576 {
4577         return register_md_personality(&raid10_personality);
4578 }
4579
4580 static void raid_exit(void)
4581 {
4582         unregister_md_personality(&raid10_personality);
4583 }
4584
4585 module_init(raid_init);
4586 module_exit(raid_exit);
4587 MODULE_LICENSE("GPL");
4588 MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
4589 MODULE_ALIAS("md-personality-9"); /* RAID10 */
4590 MODULE_ALIAS("md-raid10");
4591 MODULE_ALIAS("md-level-10");
4592
4593 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);