UPSTREAM: mmc: core: update mmc.c upstream version
[firefly-linux-kernel-4.4.55.git] / drivers / md / raid1.c
index d9d031ede4bf5d73993a0fc607fab4274627c890..515554c7365b19400e6bbd0f8c2672c12e77feab 100644 (file)
@@ -90,6 +90,8 @@ static void r1bio_pool_free(void *r1_bio, void *data)
 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
+#define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
+#define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
 #define NEXT_NORMALIO_DISTANCE (3 * RESYNC_WINDOW_SECTORS)
 
 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
@@ -1587,9 +1589,21 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
        if (mddev->recovery_disabled == conf->recovery_disabled)
                return -EBUSY;
 
+       if (md_integrity_add_rdev(rdev, mddev))
+               return -ENXIO;
+
        if (rdev->raid_disk >= 0)
                first = last = rdev->raid_disk;
 
+       /*
+        * find the disk ... but prefer rdev->saved_raid_disk
+        * if possible.
+        */
+       if (rdev->saved_raid_disk >= 0 &&
+           rdev->saved_raid_disk >= first &&
+           conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
+               first = last = rdev->saved_raid_disk;
+
        for (mirror = first; mirror <= last; mirror++) {
                p = conf->mirrors+mirror;
                if (!p->rdev) {
@@ -1621,7 +1635,6 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
                        break;
                }
        }
-       md_integrity_add_rdev(rdev, mddev);
        if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
                queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
        print_conf(conf);
@@ -2261,6 +2274,7 @@ static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
        if (fail) {
                spin_lock_irq(&conf->device_lock);
                list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
+               conf->nr_queued++;
                spin_unlock_irq(&conf->device_lock);
                md_wakeup_thread(conf->mddev->thread);
        } else {
@@ -2378,8 +2392,10 @@ static void raid1d(struct md_thread *thread)
                LIST_HEAD(tmp);
                spin_lock_irqsave(&conf->device_lock, flags);
                if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) {
-                       list_add(&tmp, &conf->bio_end_io_list);
-                       list_del_init(&conf->bio_end_io_list);
+                       while (!list_empty(&conf->bio_end_io_list)) {
+                               list_move(conf->bio_end_io_list.prev, &tmp);
+                               conf->nr_queued--;
+                       }
                }
                spin_unlock_irqrestore(&conf->device_lock, flags);
                while (!list_empty(&tmp)) {
@@ -2493,6 +2509,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
 
                bitmap_close_sync(mddev->bitmap);
                close_sync(conf);
+
+               if (mddev_is_clustered(mddev)) {
+                       conf->cluster_sync_low = 0;
+                       conf->cluster_sync_high = 0;
+               }
                return 0;
        }
 
@@ -2513,7 +2534,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
                return sync_blocks;
        }
 
-       bitmap_cond_end_sync(mddev->bitmap, sector_nr);
+       /* we are incrementing sector_nr below. To be safe, we check against
+        * sector_nr + two times RESYNC_SECTORS
+        */
+
+       bitmap_cond_end_sync(mddev->bitmap, sector_nr,
+               mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
        r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO);
 
        raise_barrier(conf, sector_nr);
@@ -2704,6 +2730,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
  bio_full:
        r1_bio->sectors = nr_sectors;
 
+       if (mddev_is_clustered(mddev) &&
+                       conf->cluster_sync_high < sector_nr + nr_sectors) {
+               conf->cluster_sync_low = mddev->curr_resync_completed;
+               conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
+               /* Send resync message */
+               md_cluster_ops->resync_info_update(mddev,
+                               conf->cluster_sync_low,
+                               conf->cluster_sync_high);
+       }
+
        /* For a user-requested sync, we read all readable devices and do a
         * compare
         */
@@ -3018,9 +3054,11 @@ static int raid1_reshape(struct mddev *mddev)
                return -EINVAL;
        }
 
-       err = md_allow_write(mddev);
-       if (err)
-               return err;
+       if (!mddev_is_clustered(mddev)) {
+               err = md_allow_write(mddev);
+               if (err)
+                       return err;
+       }
 
        raid_disks = mddev->raid_disks + mddev->delta_disks;